blob: b820527f0a8d0c42806426806b516a64123366e5 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
tcharding722c9a02017-02-09 17:56:04 +11003 * NET3 Protocol independent device support routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Derived from the non IP parts of dev.c 1.0.19
tcharding722c9a02017-02-09 17:56:04 +11006 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
9 *
10 * Additional Authors:
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
17 *
18 * Changes:
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
tcharding722c9a02017-02-09 17:56:04 +110020 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
30 * drivers
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
tcharding722c9a02017-02-09 17:56:04 +110035 * Alan Cox : Fix ETH_P_ALL echoback lengths.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
40 * call a packet.
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
tcharding722c9a02017-02-09 17:56:04 +110045 * Alan Cox : Fixed nasty side effect of device close
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * changes.
47 * Rudi Cilibrasi : Pass the right thing to
48 * set_mac_address()
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
54 * 1 device.
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
62 * the backlog queue.
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
tcharding722c9a02017-02-09 17:56:04 +110066 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 * - netif_rx() feedback
69 */
70
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080071#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080073#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000077#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090078#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070080#include <linux/sched/mm.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080081#include <linux/mutex.h>
Ahmed S. Darwish11d60112020-06-03 16:49:44 +020082#include <linux/rwsem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/string.h>
84#include <linux/mm.h>
85#include <linux/socket.h>
86#include <linux/sockios.h>
87#include <linux/errno.h>
88#include <linux/interrupt.h>
89#include <linux/if_ether.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070092#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/skbuff.h>
Brenden Blancoa7862b42016-07-19 12:16:48 -070094#include <linux/bpf.h>
David S. Millerb5cdae32017-04-18 15:36:58 -040095#include <linux/bpf_trace.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
Eric Dumazet02d62e82015-11-18 06:30:52 -080098#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700102#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/pkt_sched.h>
Jiri Pirko87d83092017-05-17 11:07:54 +0200104#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000106#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/highmem.h>
108#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <linux/netpoll.h>
111#include <linux/rcupdate.h>
112#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500115#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700116#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700117#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700118#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700119#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700120#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700122#include <net/ip.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -0700123#include <net/mpls.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ipv6.h>
125#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700126#include <linux/jhash.h>
127#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700128#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900129#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900130#include <trace/events/skb.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700131#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000132#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100133#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300134#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700135#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100136#include <linux/if_macvlan.h>
Willem de Bruijne7fd2882014-08-04 22:11:48 -0400137#include <linux/errqueue.h>
Eric Dumazet3b47d302014-11-06 21:09:44 -0800138#include <linux/hrtimer.h>
Daniel Borkmann357b6cc2020-03-18 10:33:22 +0100139#include <linux/netfilter_ingress.h>
Hariprasad Shenai40e4e712016-06-08 18:09:08 +0530140#include <linux/crash_dump.h>
Davide Carattib72b5bf2017-05-18 15:44:38 +0200141#include <linux/sctp.h>
Sabrina Dubrocaae847f42017-07-21 12:49:31 +0200142#include <net/udp_tunnel.h>
Nicolas Dichtel6621dd22017-10-03 13:53:23 +0200143#include <linux/net_namespace.h>
Paolo Abeniaaa5d902018-12-14 11:51:58 +0100144#include <linux/indirect_call_wrapper.h>
Jiri Pirkoaf3836d2019-03-28 13:56:37 +0100145#include <net/devlink.h>
Heiner Kallweitbd869242020-06-20 22:35:42 +0200146#include <linux/pm_runtime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700148#include "net-sysfs.h"
149
Herbert Xud565b0a2008-12-15 23:38:52 -0800150#define MAX_GRO_SKBS 8
151
Herbert Xu5d38a072009-01-04 16:13:40 -0800152/* This should be increased if a protocol with a bigger head is added. */
153#define GRO_MAX_HEAD (MAX_HEADER + 128)
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000156static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000157struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
158struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000159static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000161static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli54951192014-07-01 21:39:43 -0700162static int call_netdevice_notifiers_info(unsigned long val,
Loic Prylli54951192014-07-01 21:39:43 -0700163 struct netdev_notifier_info *info);
Petr Machata26372602018-12-06 17:05:45 +0000164static int call_netdevice_notifiers_extack(unsigned long val,
165 struct net_device *dev,
166 struct netlink_ext_ack *extack);
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200167static struct napi_struct *napi_by_id(unsigned int napi_id);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700170 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 * semaphore.
172 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800173 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 *
175 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700176 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 * actual updates. This allows pure readers to access the list even
178 * while a writer is preparing to update it.
179 *
180 * To put it another way, dev_base_lock is held for writing only to
181 * protect against pure readers; the rtnl semaphore provides the
182 * protection against other writers.
183 *
184 * See, for example usages, register_netdevice() and
185 * unregister_netdevice(), which must be called with the rtnl
186 * semaphore held.
187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189EXPORT_SYMBOL(dev_base_lock);
190
Florian Westphal6c557002017-10-02 23:50:05 +0200191static DEFINE_MUTEX(ifalias_mutex);
192
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300193/* protects napi_hash addition/deletion and napi_gen_id */
194static DEFINE_SPINLOCK(napi_hash_lock);
195
Eric Dumazet52bd2d62015-11-18 06:30:50 -0800196static unsigned int napi_gen_id = NR_CPUS;
Eric Dumazet6180d9d2015-11-18 06:31:01 -0800197static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300198
Ahmed S. Darwish11d60112020-06-03 16:49:44 +0200199static DECLARE_RWSEM(devnet_rename_sem);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000200
Thomas Graf4e985ad2011-06-21 03:11:20 +0000201static inline void dev_base_seq_inc(struct net *net)
202{
tcharding643aa9c2017-02-09 17:56:05 +1100203 while (++net->dev_base_seq == 0)
204 ;
Thomas Graf4e985ad2011-06-21 03:11:20 +0000205}
206
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
Linus Torvalds8387ff22016-06-10 07:51:30 -0700209 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
Eric Dumazet95c96172012-04-15 05:58:06 +0000210
stephen hemminger08e98972009-11-10 07:20:34 +0000211 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Eric W. Biederman881d9662007-09-17 11:56:21 -0700214static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700216 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000219static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000220{
221#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000222 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000223#endif
224}
225
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000226static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000227{
228#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000229 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000230#endif
231}
232
Jiri Pirkoff927412019-09-30 11:48:15 +0200233static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
234 const char *name)
235{
236 struct netdev_name_node *name_node;
237
238 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
239 if (!name_node)
240 return NULL;
241 INIT_HLIST_NODE(&name_node->hlist);
242 name_node->dev = dev;
243 name_node->name = name;
244 return name_node;
245}
246
247static struct netdev_name_node *
248netdev_name_node_head_alloc(struct net_device *dev)
249{
Jiri Pirko36fbf1e2019-09-30 11:48:16 +0200250 struct netdev_name_node *name_node;
251
252 name_node = netdev_name_node_alloc(dev, dev->name);
253 if (!name_node)
254 return NULL;
255 INIT_LIST_HEAD(&name_node->list);
256 return name_node;
Jiri Pirkoff927412019-09-30 11:48:15 +0200257}
258
259static void netdev_name_node_free(struct netdev_name_node *name_node)
260{
261 kfree(name_node);
262}
263
264static void netdev_name_node_add(struct net *net,
265 struct netdev_name_node *name_node)
266{
267 hlist_add_head_rcu(&name_node->hlist,
268 dev_name_hash(net, name_node->name));
269}
270
271static void netdev_name_node_del(struct netdev_name_node *name_node)
272{
273 hlist_del_rcu(&name_node->hlist);
274}
275
276static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
277 const char *name)
278{
279 struct hlist_head *head = dev_name_hash(net, name);
280 struct netdev_name_node *name_node;
281
282 hlist_for_each_entry(name_node, head, hlist)
283 if (!strcmp(name_node->name, name))
284 return name_node;
285 return NULL;
286}
287
288static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
289 const char *name)
290{
291 struct hlist_head *head = dev_name_hash(net, name);
292 struct netdev_name_node *name_node;
293
294 hlist_for_each_entry_rcu(name_node, head, hlist)
295 if (!strcmp(name_node->name, name))
296 return name_node;
297 return NULL;
298}
299
Jiri Pirko36fbf1e2019-09-30 11:48:16 +0200300int netdev_name_node_alt_create(struct net_device *dev, const char *name)
301{
302 struct netdev_name_node *name_node;
303 struct net *net = dev_net(dev);
304
305 name_node = netdev_name_node_lookup(net, name);
306 if (name_node)
307 return -EEXIST;
308 name_node = netdev_name_node_alloc(dev, name);
309 if (!name_node)
310 return -ENOMEM;
311 netdev_name_node_add(net, name_node);
312 /* The node that holds dev->name acts as a head of per-device list. */
313 list_add_tail(&name_node->list, &dev->name_node->list);
314
315 return 0;
316}
317EXPORT_SYMBOL(netdev_name_node_alt_create);
318
319static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
320{
321 list_del(&name_node->list);
322 netdev_name_node_del(name_node);
323 kfree(name_node->name);
324 netdev_name_node_free(name_node);
325}
326
327int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
328{
329 struct netdev_name_node *name_node;
330 struct net *net = dev_net(dev);
331
332 name_node = netdev_name_node_lookup(net, name);
333 if (!name_node)
334 return -ENOENT;
Eric Dumazete08ad802020-02-14 07:53:53 -0800335 /* lookup might have found our primary name or a name belonging
336 * to another device.
337 */
338 if (name_node == dev->name_node || name_node->dev != dev)
339 return -EINVAL;
340
Jiri Pirko36fbf1e2019-09-30 11:48:16 +0200341 __netdev_name_node_alt_destroy(name_node);
342
343 return 0;
344}
345EXPORT_SYMBOL(netdev_name_node_alt_destroy);
346
347static void netdev_name_node_alt_flush(struct net_device *dev)
348{
349 struct netdev_name_node *name_node, *tmp;
350
351 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
352 __netdev_name_node_alt_destroy(name_node);
353}
354
Eric W. Biedermance286d32007-09-12 13:53:49 +0200355/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000356static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200357{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900358 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200359
360 ASSERT_RTNL();
361
362 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800363 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Jiri Pirkoff927412019-09-30 11:48:15 +0200364 netdev_name_node_add(net, dev->name_node);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000365 hlist_add_head_rcu(&dev->index_hlist,
366 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200367 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000368
369 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200370}
371
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000372/* Device list removal
373 * caller must respect a RCU grace period before freeing/reusing dev
374 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200375static void unlist_netdevice(struct net_device *dev)
376{
377 ASSERT_RTNL();
378
379 /* Unlink dev from the device chain */
380 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800381 list_del_rcu(&dev->dev_list);
Jiri Pirkoff927412019-09-30 11:48:15 +0200382 netdev_name_node_del(dev->name_node);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000383 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200384 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000385
386 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200387}
388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389/*
390 * Our notifier list
391 */
392
Alan Sternf07d5b92006-05-09 15:23:03 -0700393static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395/*
396 * Device drivers call our routines to queue packets here. We empty the
397 * queue in the local softnet handler.
398 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700399
Eric Dumazet9958da02010-04-17 04:17:02 +0000400DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700401EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Cong Wang1a33e102020-05-02 22:22:19 -0700403#ifdef CONFIG_LOCKDEP
404/*
405 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
406 * according to dev->type
407 */
408static const unsigned short netdev_lock_type[] = {
409 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
410 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
411 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
412 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
413 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
414 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
415 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
416 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
417 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
418 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
419 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
420 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
421 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
422 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
423 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
424
425static const char *const netdev_lock_name[] = {
426 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
427 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
428 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
429 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
430 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
431 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
432 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
433 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
434 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
435 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
436 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
437 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
438 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
439 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
440 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
441
442static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
Cong Wang845e0eb2020-06-08 14:53:01 -0700443static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Cong Wang1a33e102020-05-02 22:22:19 -0700444
445static inline unsigned short netdev_lock_pos(unsigned short dev_type)
446{
447 int i;
448
449 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
450 if (netdev_lock_type[i] == dev_type)
451 return i;
452 /* the last key is used by default */
453 return ARRAY_SIZE(netdev_lock_type) - 1;
454}
455
456static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
457 unsigned short dev_type)
458{
459 int i;
460
461 i = netdev_lock_pos(dev_type);
462 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
463 netdev_lock_name[i]);
464}
Cong Wang845e0eb2020-06-08 14:53:01 -0700465
466static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
467{
468 int i;
469
470 i = netdev_lock_pos(dev->type);
471 lockdep_set_class_and_name(&dev->addr_list_lock,
472 &netdev_addr_lock_key[i],
473 netdev_lock_name[i]);
474}
Cong Wang1a33e102020-05-02 22:22:19 -0700475#else
476static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
477 unsigned short dev_type)
478{
479}
Cong Wang845e0eb2020-06-08 14:53:01 -0700480
481static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
482{
483}
Cong Wang1a33e102020-05-02 22:22:19 -0700484#endif
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100487 *
488 * Protocol management and registration routines
489 *
490 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 * Add a protocol ID to the list. Now that the input handler is
495 * smarter we can dispense with all the messy stuff that used to be
496 * here.
497 *
498 * BEWARE!!! Protocol handlers, mangling input packets,
499 * MUST BE last in hash buckets and checking protocol handlers
500 * MUST start from promiscuous ptype_all chain in net_bh.
501 * It is true now, do not change it.
502 * Explanation follows: if protocol handler, mangling packet, will
503 * be the first on list, it is not able to sense, that packet
504 * is cloned and should be copied-on-write, so that it will
505 * change it and subsequent readers will get broken packet.
506 * --ANK (980803)
507 */
508
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000509static inline struct list_head *ptype_head(const struct packet_type *pt)
510{
511 if (pt->type == htons(ETH_P_ALL))
Salam Noureddine7866a622015-01-27 11:35:48 -0800512 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000513 else
Salam Noureddine7866a622015-01-27 11:35:48 -0800514 return pt->dev ? &pt->dev->ptype_specific :
515 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000516}
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518/**
519 * dev_add_pack - add packet handler
520 * @pt: packet type declaration
521 *
522 * Add a protocol handler to the networking stack. The passed &packet_type
523 * is linked into kernel lists and may not be freed until it has been
524 * removed from the kernel lists.
525 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900526 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 * guarantee all CPU's that are in middle of receiving packets
528 * will see the new packet type (until the next received packet).
529 */
530
531void dev_add_pack(struct packet_type *pt)
532{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000533 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000535 spin_lock(&ptype_lock);
536 list_add_rcu(&pt->list, head);
537 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700539EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541/**
542 * __dev_remove_pack - remove packet handler
543 * @pt: packet type declaration
544 *
545 * Remove a protocol handler that was previously added to the kernel
546 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
547 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900548 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 *
550 * The packet type might still be in use by receivers
551 * and must not be freed until after all the CPU's have gone
552 * through a quiescent state.
553 */
554void __dev_remove_pack(struct packet_type *pt)
555{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000556 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 struct packet_type *pt1;
558
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000559 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 list_for_each_entry(pt1, head, list) {
562 if (pt == pt1) {
563 list_del_rcu(&pt->list);
564 goto out;
565 }
566 }
567
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000568 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000570 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700572EXPORT_SYMBOL(__dev_remove_pack);
573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574/**
575 * dev_remove_pack - remove packet handler
576 * @pt: packet type declaration
577 *
578 * Remove a protocol handler that was previously added to the kernel
579 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
580 * from the kernel lists and can be freed or reused once this function
581 * returns.
582 *
583 * This call sleeps to guarantee that no CPU is looking at the packet
584 * type after return.
585 */
586void dev_remove_pack(struct packet_type *pt)
587{
588 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 synchronize_net();
591}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700592EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Vlad Yasevich62532da2012-11-15 08:49:10 +0000594
595/**
596 * dev_add_offload - register offload handlers
597 * @po: protocol offload declaration
598 *
599 * Add protocol offload handlers to the networking stack. The passed
600 * &proto_offload is linked into kernel lists and may not be freed until
601 * it has been removed from the kernel lists.
602 *
603 * This call does not sleep therefore it can not
604 * guarantee all CPU's that are in middle of receiving packets
605 * will see the new offload handlers (until the next received packet).
606 */
607void dev_add_offload(struct packet_offload *po)
608{
David S. Millerbdef7de2015-06-01 14:56:09 -0700609 struct packet_offload *elem;
Vlad Yasevich62532da2012-11-15 08:49:10 +0000610
611 spin_lock(&offload_lock);
David S. Millerbdef7de2015-06-01 14:56:09 -0700612 list_for_each_entry(elem, &offload_base, list) {
613 if (po->priority < elem->priority)
614 break;
615 }
616 list_add_rcu(&po->list, elem->list.prev);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000617 spin_unlock(&offload_lock);
618}
619EXPORT_SYMBOL(dev_add_offload);
620
621/**
622 * __dev_remove_offload - remove offload handler
623 * @po: packet offload declaration
624 *
625 * Remove a protocol offload handler that was previously added to the
626 * kernel offload handlers by dev_add_offload(). The passed &offload_type
627 * is removed from the kernel lists and can be freed or reused once this
628 * function returns.
629 *
630 * The packet type might still be in use by receivers
631 * and must not be freed until after all the CPU's have gone
632 * through a quiescent state.
633 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800634static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000635{
636 struct list_head *head = &offload_base;
637 struct packet_offload *po1;
638
Eric Dumazetc53aa502012-11-16 08:08:23 +0000639 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000640
641 list_for_each_entry(po1, head, list) {
642 if (po == po1) {
643 list_del_rcu(&po->list);
644 goto out;
645 }
646 }
647
648 pr_warn("dev_remove_offload: %p not found\n", po);
649out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000650 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000651}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000652
653/**
654 * dev_remove_offload - remove packet offload handler
655 * @po: packet offload declaration
656 *
657 * Remove a packet offload handler that was previously added to the kernel
658 * offload handlers by dev_add_offload(). The passed &offload_type is
659 * removed from the kernel lists and can be freed or reused once this
660 * function returns.
661 *
662 * This call sleeps to guarantee that no CPU is looking at the packet
663 * type after return.
664 */
665void dev_remove_offload(struct packet_offload *po)
666{
667 __dev_remove_offload(po);
668
669 synchronize_net();
670}
671EXPORT_SYMBOL(dev_remove_offload);
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673/******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100674 *
675 * Device Boot-time Settings Routines
676 *
677 ******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679/* Boot time configuration table */
680static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
681
682/**
683 * netdev_boot_setup_add - add new setup entry
684 * @name: name of the device
685 * @map: configured settings for the device
686 *
687 * Adds new setup entry to the dev_boot_setup list. The function
688 * returns 0 on error and 1 on success. This is a generic routine to
689 * all netdevices.
690 */
691static int netdev_boot_setup_add(char *name, struct ifmap *map)
692{
693 struct netdev_boot_setup *s;
694 int i;
695
696 s = dev_boot_setup;
697 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
698 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
699 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700700 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 memcpy(&s[i].map, map, sizeof(s[i].map));
702 break;
703 }
704 }
705
706 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
707}
708
709/**
tcharding722c9a02017-02-09 17:56:04 +1100710 * netdev_boot_setup_check - check boot time settings
711 * @dev: the netdevice
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 *
tcharding722c9a02017-02-09 17:56:04 +1100713 * Check boot time settings for the device.
714 * The found settings are set for the device to be used
715 * later in the device probing.
716 * Returns 0 if no settings found, 1 if they are.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 */
718int netdev_boot_setup_check(struct net_device *dev)
719{
720 struct netdev_boot_setup *s = dev_boot_setup;
721 int i;
722
723 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
724 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700725 !strcmp(dev->name, s[i].name)) {
tcharding722c9a02017-02-09 17:56:04 +1100726 dev->irq = s[i].map.irq;
727 dev->base_addr = s[i].map.base_addr;
728 dev->mem_start = s[i].map.mem_start;
729 dev->mem_end = s[i].map.mem_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 return 1;
731 }
732 }
733 return 0;
734}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700735EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737
738/**
tcharding722c9a02017-02-09 17:56:04 +1100739 * netdev_boot_base - get address from boot time settings
740 * @prefix: prefix for network device
741 * @unit: id for network device
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 *
tcharding722c9a02017-02-09 17:56:04 +1100743 * Check boot time settings for the base address of device.
744 * The found settings are set for the device to be used
745 * later in the device probing.
746 * Returns 0 if no settings found.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 */
748unsigned long netdev_boot_base(const char *prefix, int unit)
749{
750 const struct netdev_boot_setup *s = dev_boot_setup;
751 char name[IFNAMSIZ];
752 int i;
753
754 sprintf(name, "%s%d", prefix, unit);
755
756 /*
757 * If device already registered then return base of 1
758 * to indicate not to probe for this interface
759 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700760 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 return 1;
762
763 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
764 if (!strcmp(name, s[i].name))
765 return s[i].map.base_addr;
766 return 0;
767}
768
769/*
770 * Saves at boot time configured settings for any netdevice.
771 */
772int __init netdev_boot_setup(char *str)
773{
774 int ints[5];
775 struct ifmap map;
776
777 str = get_options(str, ARRAY_SIZE(ints), ints);
778 if (!str || !*str)
779 return 0;
780
781 /* Save settings */
782 memset(&map, 0, sizeof(map));
783 if (ints[0] > 0)
784 map.irq = ints[1];
785 if (ints[0] > 1)
786 map.base_addr = ints[2];
787 if (ints[0] > 2)
788 map.mem_start = ints[3];
789 if (ints[0] > 3)
790 map.mem_end = ints[4];
791
792 /* Add new entry to the list */
793 return netdev_boot_setup_add(str, &map);
794}
795
796__setup("netdev=", netdev_boot_setup);
797
798/*******************************************************************************
tchardingeb13da12017-02-09 17:56:06 +1100799 *
800 * Device Interface Subroutines
801 *
802 *******************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804/**
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200805 * dev_get_iflink - get 'iflink' value of a interface
806 * @dev: targeted interface
807 *
808 * Indicates the ifindex the interface is linked to.
809 * Physical interfaces have the same 'ifindex' and 'iflink' values.
810 */
811
812int dev_get_iflink(const struct net_device *dev)
813{
814 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
815 return dev->netdev_ops->ndo_get_iflink(dev);
816
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +0200817 return dev->ifindex;
Nicolas Dichtela54acb32015-04-02 17:07:00 +0200818}
819EXPORT_SYMBOL(dev_get_iflink);
820
821/**
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700822 * dev_fill_metadata_dst - Retrieve tunnel egress information.
823 * @dev: targeted interface
824 * @skb: The packet.
825 *
826 * For better visibility of tunnel traffic OVS needs to retrieve
827 * egress tunnel information for a packet. Following API allows
828 * user to get this info.
829 */
830int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
831{
832 struct ip_tunnel_info *info;
833
834 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
835 return -EINVAL;
836
837 info = skb_tunnel_info_unclone(skb);
838 if (!info)
839 return -ENOMEM;
840 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
841 return -EINVAL;
842
843 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
844}
845EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
846
847/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700849 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 * @name: name to find
851 *
852 * Find an interface by name. Must be called under RTNL semaphore
853 * or @dev_base_lock. If the name is found a pointer to the device
854 * is returned. If the name is not found then %NULL is returned. The
855 * reference counters are not incremented so the caller must be
856 * careful with locks.
857 */
858
Eric W. Biederman881d9662007-09-17 11:56:21 -0700859struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Jiri Pirkoff927412019-09-30 11:48:15 +0200861 struct netdev_name_node *node_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
Jiri Pirkoff927412019-09-30 11:48:15 +0200863 node_name = netdev_name_node_lookup(net, name);
864 return node_name ? node_name->dev : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700866EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868/**
tcharding722c9a02017-02-09 17:56:04 +1100869 * dev_get_by_name_rcu - find a device by its name
870 * @net: the applicable net namespace
871 * @name: name to find
Eric Dumazet72c95282009-10-30 07:11:27 +0000872 *
tcharding722c9a02017-02-09 17:56:04 +1100873 * Find an interface by name.
874 * If the name is found a pointer to the device is returned.
875 * If the name is not found then %NULL is returned.
876 * The reference counters are not incremented so the caller must be
877 * careful with locks. The caller must hold RCU lock.
Eric Dumazet72c95282009-10-30 07:11:27 +0000878 */
879
880struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
881{
Jiri Pirkoff927412019-09-30 11:48:15 +0200882 struct netdev_name_node *node_name;
Eric Dumazet72c95282009-10-30 07:11:27 +0000883
Jiri Pirkoff927412019-09-30 11:48:15 +0200884 node_name = netdev_name_node_lookup_rcu(net, name);
885 return node_name ? node_name->dev : NULL;
Eric Dumazet72c95282009-10-30 07:11:27 +0000886}
887EXPORT_SYMBOL(dev_get_by_name_rcu);
888
889/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700891 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 * @name: name to find
893 *
894 * Find an interface by name. This can be called from any
895 * context and does its own locking. The returned handle has
896 * the usage count incremented and the caller must use dev_put() to
897 * release it when it is no longer needed. %NULL is returned if no
898 * matching device is found.
899 */
900
Eric W. Biederman881d9662007-09-17 11:56:21 -0700901struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902{
903 struct net_device *dev;
904
Eric Dumazet72c95282009-10-30 07:11:27 +0000905 rcu_read_lock();
906 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if (dev)
908 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000909 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 return dev;
911}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700912EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914/**
915 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700916 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 * @ifindex: index of device
918 *
919 * Search for an interface by index. Returns %NULL if the device
920 * is not found or a pointer to the device. The device has not
921 * had its reference counter increased so the caller must be careful
922 * about locking. The caller must hold either the RTNL semaphore
923 * or @dev_base_lock.
924 */
925
Eric W. Biederman881d9662007-09-17 11:56:21 -0700926struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700928 struct net_device *dev;
929 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Sasha Levinb67bfe02013-02-27 17:06:00 -0800931 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 if (dev->ifindex == ifindex)
933 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return NULL;
936}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700937EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000939/**
940 * dev_get_by_index_rcu - find a device by its ifindex
941 * @net: the applicable net namespace
942 * @ifindex: index of device
943 *
944 * Search for an interface by index. Returns %NULL if the device
945 * is not found or a pointer to the device. The device has not
946 * had its reference counter increased so the caller must be careful
947 * about locking. The caller must hold RCU lock.
948 */
949
950struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
951{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000952 struct net_device *dev;
953 struct hlist_head *head = dev_index_hash(net, ifindex);
954
Sasha Levinb67bfe02013-02-27 17:06:00 -0800955 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000956 if (dev->ifindex == ifindex)
957 return dev;
958
959 return NULL;
960}
961EXPORT_SYMBOL(dev_get_by_index_rcu);
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964/**
965 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700966 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 * @ifindex: index of device
968 *
969 * Search for an interface by index. Returns NULL if the device
970 * is not found or a pointer to the device. The device returned has
971 * had a reference added and the pointer is safe until the user calls
972 * dev_put to indicate they have finished with it.
973 */
974
Eric W. Biederman881d9662007-09-17 11:56:21 -0700975struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
977 struct net_device *dev;
978
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000979 rcu_read_lock();
980 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (dev)
982 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000983 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 return dev;
985}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700986EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988/**
Miroslav Lichvar90b602f2017-05-19 17:52:37 +0200989 * dev_get_by_napi_id - find a device by napi_id
990 * @napi_id: ID of the NAPI struct
991 *
992 * Search for an interface by NAPI ID. Returns %NULL if the device
993 * is not found or a pointer to the device. The device has not had
994 * its reference counter increased so the caller must be careful
995 * about locking. The caller must hold RCU lock.
996 */
997
998struct net_device *dev_get_by_napi_id(unsigned int napi_id)
999{
1000 struct napi_struct *napi;
1001
1002 WARN_ON_ONCE(!rcu_read_lock_held());
1003
1004 if (napi_id < MIN_NAPI_ID)
1005 return NULL;
1006
1007 napi = napi_by_id(napi_id);
1008
1009 return napi ? napi->dev : NULL;
1010}
1011EXPORT_SYMBOL(dev_get_by_napi_id);
1012
1013/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001014 * netdev_get_name - get a netdevice name, knowing its ifindex.
1015 * @net: network namespace
1016 * @name: a pointer to the buffer where the name will be stored.
1017 * @ifindex: the ifindex of the interface to get the name from.
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001018 */
1019int netdev_get_name(struct net *net, char *name, int ifindex)
1020{
1021 struct net_device *dev;
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001022 int ret;
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001023
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001024 down_read(&devnet_rename_sem);
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001025 rcu_read_lock();
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001026
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001027 dev = dev_get_by_index_rcu(net, ifindex);
1028 if (!dev) {
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001029 ret = -ENODEV;
1030 goto out;
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001031 }
1032
1033 strcpy(name, dev->name);
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001034
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001035 ret = 0;
1036out:
1037 rcu_read_unlock();
1038 up_read(&devnet_rename_sem);
1039 return ret;
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001040}
1041
1042/**
Eric Dumazet941666c2010-12-05 01:23:53 +00001043 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001044 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 * @type: media type of device
1046 * @ha: hardware address
1047 *
1048 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -08001049 * is not found or a pointer to the device.
1050 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +00001051 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 * and the caller must therefore be careful about locking
1053 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 */
1055
Eric Dumazet941666c2010-12-05 01:23:53 +00001056struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1057 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058{
1059 struct net_device *dev;
1060
Eric Dumazet941666c2010-12-05 01:23:53 +00001061 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 if (dev->type == type &&
1063 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -07001064 return dev;
1065
1066 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067}
Eric Dumazet941666c2010-12-05 01:23:53 +00001068EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -03001069
Eric W. Biederman881d9662007-09-17 11:56:21 -07001070struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -07001071{
1072 struct net_device *dev;
1073
1074 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -07001075 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -07001076 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001077 return dev;
1078
1079 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -07001080}
Patrick McHardy4e9cac22007-05-03 03:28:13 -07001081EXPORT_SYMBOL(__dev_getfirstbyhwtype);
1082
Eric W. Biederman881d9662007-09-17 11:56:21 -07001083struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084{
Eric Dumazet99fe3c32010-03-18 11:27:25 +00001085 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Eric Dumazet99fe3c32010-03-18 11:27:25 +00001087 rcu_read_lock();
1088 for_each_netdev_rcu(net, dev)
1089 if (dev->type == type) {
1090 dev_hold(dev);
1091 ret = dev;
1092 break;
1093 }
1094 rcu_read_unlock();
1095 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097EXPORT_SYMBOL(dev_getfirstbyhwtype);
1098
1099/**
WANG Cong6c555492014-09-11 15:35:09 -07001100 * __dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001101 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 * @if_flags: IFF_* values
1103 * @mask: bitmask of bits in if_flags to check
1104 *
1105 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +00001106 * is not found or a pointer to the device. Must be called inside
WANG Cong6c555492014-09-11 15:35:09 -07001107 * rtnl_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 */
1109
WANG Cong6c555492014-09-11 15:35:09 -07001110struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1111 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112{
Pavel Emelianov7562f872007-05-03 15:13:45 -07001113 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
WANG Cong6c555492014-09-11 15:35:09 -07001115 ASSERT_RTNL();
1116
Pavel Emelianov7562f872007-05-03 15:13:45 -07001117 ret = NULL;
WANG Cong6c555492014-09-11 15:35:09 -07001118 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -07001120 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 break;
1122 }
1123 }
Pavel Emelianov7562f872007-05-03 15:13:45 -07001124 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125}
WANG Cong6c555492014-09-11 15:35:09 -07001126EXPORT_SYMBOL(__dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128/**
1129 * dev_valid_name - check if name is okay for network device
1130 * @name: name string
1131 *
1132 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -07001133 * to allow sysfs to work. We also disallow any kind of
1134 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 */
David S. Miller95f050b2012-03-06 16:12:15 -05001136bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137{
David S. Millerc7fa9d12006-08-15 16:34:13 -07001138 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -05001139 return false;
Eric Dumazeta9d48202018-04-05 06:39:26 -07001140 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -05001141 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001142 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -05001143 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001144
1145 while (*name) {
Matthew Thodea4176a92015-02-17 18:31:57 -06001146 if (*name == '/' || *name == ':' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -05001147 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -07001148 name++;
1149 }
David S. Miller95f050b2012-03-06 16:12:15 -05001150 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001152EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001155 * __dev_alloc_name - allocate a name for a device
1156 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001158 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 *
1160 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -07001161 * id. It scans list of devices to build up a free map, then chooses
1162 * the first empty slot. The caller must hold the dev_base or rtnl lock
1163 * while allocating the name and adding the device in order to avoid
1164 * duplicates.
1165 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1166 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 */
1168
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001169static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170{
1171 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 const char *p;
1173 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001174 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 struct net_device *d;
1176
Rasmus Villemoes93809102017-11-13 00:15:08 +01001177 if (!dev_valid_name(name))
1178 return -EINVAL;
1179
Rasmus Villemoes51f299d2017-11-13 00:15:04 +01001180 p = strchr(name, '%');
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 if (p) {
1182 /*
1183 * Verify the string as this thing may have come from
1184 * the user. There must be either one "%d" and no other "%"
1185 * characters.
1186 */
1187 if (p[1] != 'd' || strchr(p + 2, '%'))
1188 return -EINVAL;
1189
1190 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -07001191 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 if (!inuse)
1193 return -ENOMEM;
1194
Eric W. Biederman881d9662007-09-17 11:56:21 -07001195 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 if (!sscanf(d->name, name, &i))
1197 continue;
1198 if (i < 0 || i >= max_netdevices)
1199 continue;
1200
1201 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001202 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 if (!strncmp(buf, d->name, IFNAMSIZ))
1204 set_bit(i, inuse);
1205 }
1206
1207 i = find_first_zero_bit(inuse, max_netdevices);
1208 free_page((unsigned long) inuse);
1209 }
1210
Rasmus Villemoes6224abd2017-11-13 00:15:07 +01001211 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001212 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 /* It is possible to run out of possible slots
1216 * when the name is long and there isn't enough space left
1217 * for the digits, or if all bits are used.
1218 */
Johannes Berg029b6d12017-12-02 08:41:55 +01001219 return -ENFILE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220}
1221
Rasmus Villemoes2c88b852017-11-13 00:15:05 +01001222static int dev_alloc_name_ns(struct net *net,
1223 struct net_device *dev,
1224 const char *name)
1225{
1226 char buf[IFNAMSIZ];
1227 int ret;
1228
Rasmus Villemoesc46d7642017-11-13 00:15:06 +01001229 BUG_ON(!net);
Rasmus Villemoes2c88b852017-11-13 00:15:05 +01001230 ret = __dev_alloc_name(net, name, buf);
1231 if (ret >= 0)
1232 strlcpy(dev->name, buf, IFNAMSIZ);
1233 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234}
1235
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001236/**
1237 * dev_alloc_name - allocate a name for a device
1238 * @dev: device
1239 * @name: name format string
1240 *
1241 * Passed a format string - eg "lt%d" it will try and find a suitable
1242 * id. It scans list of devices to build up a free map, then chooses
1243 * the first empty slot. The caller must hold the dev_base or rtnl lock
1244 * while allocating the name and adding the device in order to avoid
1245 * duplicates.
1246 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1247 * Returns the number of the unit assigned or a negative errno code.
1248 */
1249
1250int dev_alloc_name(struct net_device *dev, const char *name)
1251{
Rasmus Villemoesc46d7642017-11-13 00:15:06 +01001252 return dev_alloc_name_ns(dev_net(dev), dev, name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001253}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001254EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001255
Eric Dumazetbacb7e12019-10-08 14:20:34 -07001256static int dev_get_valid_name(struct net *net, struct net_device *dev,
1257 const char *name)
Gao feng828de4f2012-09-13 20:58:27 +00001258{
David S. Miller55a5ec92018-01-02 11:45:07 -05001259 BUG_ON(!net);
1260
1261 if (!dev_valid_name(name))
1262 return -EINVAL;
1263
1264 if (strchr(name, '%'))
1265 return dev_alloc_name_ns(net, dev, name);
1266 else if (__dev_get_by_name(net, name))
1267 return -EEXIST;
1268 else if (dev->name != name)
1269 strlcpy(dev->name, name, IFNAMSIZ);
1270
1271 return 0;
Octavian Purdilad9031022009-11-18 02:36:59 +00001272}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
1274/**
1275 * dev_change_name - change name of a device
1276 * @dev: device
1277 * @newname: name (or format string) must be at least IFNAMSIZ
1278 *
1279 * Change name of a device, can pass format strings "eth%d".
1280 * for wildcarding.
1281 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001282int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283{
Tom Gundersen238fa362014-07-14 16:37:23 +02001284 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001285 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001287 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001288 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001291 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001293 net = dev_net(dev);
Si-Wei Liu8065a772019-04-08 19:45:27 -04001294
1295 /* Some auto-enslaved devices e.g. failover slaves are
1296 * special, as userspace might rename the device after
1297 * the interface had been brought up and running since
1298 * the point kernel initiated auto-enslavement. Allow
1299 * live name change even when these slave devices are
1300 * up and running.
1301 *
1302 * Typically, users of these auto-enslaving devices
1303 * don't actually care about slave name change, as
1304 * they are supposed to operate on master interface
1305 * directly.
1306 */
1307 if (dev->flags & IFF_UP &&
1308 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return -EBUSY;
1310
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001311 down_write(&devnet_rename_sem);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001312
1313 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001314 up_write(&devnet_rename_sem);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001315 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001316 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001317
Herbert Xufcc5a032007-07-30 17:03:38 -07001318 memcpy(oldname, dev->name, IFNAMSIZ);
1319
Gao feng828de4f2012-09-13 20:58:27 +00001320 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001321 if (err < 0) {
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001322 up_write(&devnet_rename_sem);
Octavian Purdilad9031022009-11-18 02:36:59 +00001323 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Veaceslav Falico6fe82a32014-07-17 20:33:32 +02001326 if (oldname[0] && !strchr(oldname, '%'))
1327 netdev_info(dev, "renamed from %s\n", oldname);
1328
Tom Gundersen238fa362014-07-14 16:37:23 +02001329 old_assign_type = dev->name_assign_type;
1330 dev->name_assign_type = NET_NAME_RENAMED;
1331
Herbert Xufcc5a032007-07-30 17:03:38 -07001332rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001333 ret = device_rename(&dev->dev, dev->name);
1334 if (ret) {
1335 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001336 dev->name_assign_type = old_assign_type;
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001337 up_write(&devnet_rename_sem);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001338 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001339 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001340
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001341 up_write(&devnet_rename_sem);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001342
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001343 netdev_adjacent_rename_links(dev, oldname);
1344
Herbert Xu7f988ea2007-07-30 16:35:46 -07001345 write_lock_bh(&dev_base_lock);
Jiri Pirkoff927412019-09-30 11:48:15 +02001346 netdev_name_node_del(dev->name_node);
Eric Dumazet72c95282009-10-30 07:11:27 +00001347 write_unlock_bh(&dev_base_lock);
1348
1349 synchronize_rcu();
1350
1351 write_lock_bh(&dev_base_lock);
Jiri Pirkoff927412019-09-30 11:48:15 +02001352 netdev_name_node_add(net, dev->name_node);
Herbert Xu7f988ea2007-07-30 16:35:46 -07001353 write_unlock_bh(&dev_base_lock);
1354
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001355 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001356 ret = notifier_to_errno(ret);
1357
1358 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001359 /* err >= 0 after dev_alloc_name() or stores the first errno */
1360 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001361 err = ret;
Ahmed S. Darwish11d60112020-06-03 16:49:44 +02001362 down_write(&devnet_rename_sem);
Herbert Xufcc5a032007-07-30 17:03:38 -07001363 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001364 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001365 dev->name_assign_type = old_assign_type;
1366 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001367 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001368 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001369 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001370 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001371 }
1372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
1374 return err;
1375}
1376
1377/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001378 * dev_set_alias - change ifalias of a device
1379 * @dev: device
1380 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001381 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001382 *
1383 * Set ifalias for a device,
1384 */
1385int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1386{
Florian Westphal6c557002017-10-02 23:50:05 +02001387 struct dev_ifalias *new_alias = NULL;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001388
1389 if (len >= IFALIASZ)
1390 return -EINVAL;
1391
Florian Westphal6c557002017-10-02 23:50:05 +02001392 if (len) {
1393 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1394 if (!new_alias)
1395 return -ENOMEM;
1396
1397 memcpy(new_alias->ifalias, alias, len);
1398 new_alias->ifalias[len] = 0;
Oliver Hartkopp96ca4a2c2008-09-23 21:23:19 -07001399 }
1400
Florian Westphal6c557002017-10-02 23:50:05 +02001401 mutex_lock(&ifalias_mutex);
Paul E. McKenneye3f0d762019-09-23 15:42:28 -07001402 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1403 mutex_is_locked(&ifalias_mutex));
Florian Westphal6c557002017-10-02 23:50:05 +02001404 mutex_unlock(&ifalias_mutex);
1405
1406 if (new_alias)
1407 kfree_rcu(new_alias, rcuhead);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001408
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001409 return len;
1410}
Stephen Hemminger0fe554a2018-04-17 14:25:30 -07001411EXPORT_SYMBOL(dev_set_alias);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001412
Florian Westphal6c557002017-10-02 23:50:05 +02001413/**
1414 * dev_get_alias - get ifalias of a device
1415 * @dev: device
Florian Westphal20e88322017-10-04 13:56:50 +02001416 * @name: buffer to store name of ifalias
Florian Westphal6c557002017-10-02 23:50:05 +02001417 * @len: size of buffer
1418 *
1419 * get ifalias for a device. Caller must make sure dev cannot go
1420 * away, e.g. rcu read lock or own a reference count to device.
1421 */
1422int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1423{
1424 const struct dev_ifalias *alias;
1425 int ret = 0;
1426
1427 rcu_read_lock();
1428 alias = rcu_dereference(dev->ifalias);
1429 if (alias)
1430 ret = snprintf(name, len, "%s", alias->ifalias);
1431 rcu_read_unlock();
1432
1433 return ret;
1434}
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001435
1436/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001437 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001438 * @dev: device to cause notification
1439 *
1440 * Called to indicate a device has changed features.
1441 */
1442void netdev_features_change(struct net_device *dev)
1443{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001444 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001445}
1446EXPORT_SYMBOL(netdev_features_change);
1447
1448/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 * netdev_state_change - device changes state
1450 * @dev: device to cause notification
1451 *
1452 * Called to indicate a device has changed state. This function calls
1453 * the notifier chains for netdev_chain and sends a NEWLINK message
1454 * to the routing socket.
1455 */
1456void netdev_state_change(struct net_device *dev)
1457{
1458 if (dev->flags & IFF_UP) {
David Ahern51d0c0472017-10-04 17:48:45 -07001459 struct netdev_notifier_change_info change_info = {
1460 .info.dev = dev,
1461 };
Loic Prylli54951192014-07-01 21:39:43 -07001462
David Ahern51d0c0472017-10-04 17:48:45 -07001463 call_netdevice_notifiers_info(NETDEV_CHANGE,
Loic Prylli54951192014-07-01 21:39:43 -07001464 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001465 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 }
1467}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001468EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
Amerigo Wangee89bab2012-08-09 22:14:56 +00001470/**
tcharding722c9a02017-02-09 17:56:04 +11001471 * netdev_notify_peers - notify network peers about existence of @dev
1472 * @dev: network device
Amerigo Wangee89bab2012-08-09 22:14:56 +00001473 *
1474 * Generate traffic such that interested network peers are aware of
1475 * @dev, such as by generating a gratuitous ARP. This may be used when
1476 * a device wants to inform the rest of the network about some sort of
1477 * reconfiguration such as a failover event or virtual machine
1478 * migration.
1479 */
1480void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001481{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001482 rtnl_lock();
1483 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
Vlad Yasevich37c343b2017-03-14 08:58:08 -04001484 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00001485 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001486}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001487EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001488
Petr Machata40c900a2018-12-06 17:05:47 +00001489static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001491 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001492 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001494 ASSERT_RTNL();
1495
Heiner Kallweitbd869242020-06-20 22:35:42 +02001496 if (!netif_device_present(dev)) {
1497 /* may be detached because parent is runtime-suspended */
1498 if (dev->dev.parent)
1499 pm_runtime_resume(dev->dev.parent);
1500 if (!netif_device_present(dev))
1501 return -ENODEV;
1502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Neil Hormanca99ca12013-02-05 08:05:43 +00001504 /* Block netpoll from trying to do any rx path servicing.
1505 * If we don't do this there is a chance ndo_poll_controller
1506 * or ndo_poll may be running while we open the device
1507 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001508 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001509
Petr Machata40c900a2018-12-06 17:05:47 +00001510 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001511 ret = notifier_to_errno(ret);
1512 if (ret)
1513 return ret;
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001516
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001517 if (ops->ndo_validate_addr)
1518 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001519
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001520 if (!ret && ops->ndo_open)
1521 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Eric W. Biederman66b55522014-03-27 15:39:03 -07001523 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001524
Jeff Garzikbada3392007-10-23 20:19:37 -07001525 if (ret)
1526 clear_bit(__LINK_STATE_START, &dev->state);
1527 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 dev->flags |= IFF_UP;
Patrick McHardy4417da62007-06-27 01:28:10 -07001529 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001531 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return ret;
1535}
Patrick McHardybd380812010-02-26 06:34:53 +00001536
1537/**
1538 * dev_open - prepare an interface for use.
Petr Machata00f54e62018-12-06 17:05:36 +00001539 * @dev: device to open
1540 * @extack: netlink extended ack
Patrick McHardybd380812010-02-26 06:34:53 +00001541 *
1542 * Takes a device from down to up state. The device's private open
1543 * function is invoked and then the multicast lists are loaded. Finally
1544 * the device is moved into the up state and a %NETDEV_UP message is
1545 * sent to the netdev notifier chain.
1546 *
1547 * Calling this function on an active interface is a nop. On a failure
1548 * a negative errno code is returned.
1549 */
Petr Machata00f54e62018-12-06 17:05:36 +00001550int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
Patrick McHardybd380812010-02-26 06:34:53 +00001551{
1552 int ret;
1553
Patrick McHardybd380812010-02-26 06:34:53 +00001554 if (dev->flags & IFF_UP)
1555 return 0;
1556
Petr Machata40c900a2018-12-06 17:05:47 +00001557 ret = __dev_open(dev, extack);
Patrick McHardybd380812010-02-26 06:34:53 +00001558 if (ret < 0)
1559 return ret;
1560
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001561 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001562 call_netdevice_notifiers(NETDEV_UP, dev);
1563
1564 return ret;
1565}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001566EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
stephen hemminger7051b882017-07-18 15:59:27 -07001568static void __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569{
Octavian Purdila44345722010-12-13 12:44:07 +00001570 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001571
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001572 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001573 might_sleep();
1574
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001575 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001576 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001577 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001578
Octavian Purdila44345722010-12-13 12:44:07 +00001579 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Octavian Purdila44345722010-12-13 12:44:07 +00001581 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Octavian Purdila44345722010-12-13 12:44:07 +00001583 /* Synchronize to scheduled poll. We cannot touch poll list, it
1584 * can be even on different cpu. So just clear netif_running().
1585 *
1586 * dev->stop() will invoke napi_disable() on all of it's
1587 * napi_struct instances on this device.
1588 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001589 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
Octavian Purdila44345722010-12-13 12:44:07 +00001592 dev_deactivate_many(head);
1593
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001594 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001595 const struct net_device_ops *ops = dev->netdev_ops;
1596
1597 /*
1598 * Call the device specific close. This cannot fail.
1599 * Only if device is UP
1600 *
1601 * We allow it to be called even after a DETACH hot-plug
1602 * event.
1603 */
1604 if (ops->ndo_stop)
1605 ops->ndo_stop(dev);
1606
Octavian Purdila44345722010-12-13 12:44:07 +00001607 dev->flags &= ~IFF_UP;
Eric W. Biederman66b55522014-03-27 15:39:03 -07001608 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001609 }
Octavian Purdila44345722010-12-13 12:44:07 +00001610}
1611
stephen hemminger7051b882017-07-18 15:59:27 -07001612static void __dev_close(struct net_device *dev)
Octavian Purdila44345722010-12-13 12:44:07 +00001613{
1614 LIST_HEAD(single);
1615
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001616 list_add(&dev->close_list, &single);
stephen hemminger7051b882017-07-18 15:59:27 -07001617 __dev_close_many(&single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001618 list_del(&single);
Octavian Purdila44345722010-12-13 12:44:07 +00001619}
1620
stephen hemminger7051b882017-07-18 15:59:27 -07001621void dev_close_many(struct list_head *head, bool unlink)
Octavian Purdila44345722010-12-13 12:44:07 +00001622{
1623 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001624
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001625 /* Remove the devices that don't need to be closed */
1626 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001627 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001628 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001629
1630 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001631
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001632 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001633 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001634 call_netdevice_notifiers(NETDEV_DOWN, dev);
David S. Miller99c4a262015-03-18 22:52:33 -04001635 if (unlink)
1636 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638}
David S. Miller99c4a262015-03-18 22:52:33 -04001639EXPORT_SYMBOL(dev_close_many);
Patrick McHardybd380812010-02-26 06:34:53 +00001640
1641/**
1642 * dev_close - shutdown an interface.
1643 * @dev: device to shutdown
1644 *
1645 * This function moves an active device into down state. A
1646 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1647 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1648 * chain.
1649 */
stephen hemminger7051b882017-07-18 15:59:27 -07001650void dev_close(struct net_device *dev)
Patrick McHardybd380812010-02-26 06:34:53 +00001651{
Eric Dumazete14a5992011-05-10 12:26:06 -07001652 if (dev->flags & IFF_UP) {
1653 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001654
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001655 list_add(&dev->close_list, &single);
David S. Miller99c4a262015-03-18 22:52:33 -04001656 dev_close_many(&single, true);
Eric Dumazete14a5992011-05-10 12:26:06 -07001657 list_del(&single);
1658 }
Patrick McHardybd380812010-02-26 06:34:53 +00001659}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001660EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001663/**
1664 * dev_disable_lro - disable Large Receive Offload on a device
1665 * @dev: device
1666 *
1667 * Disable Large Receive Offload (LRO) on a net device. Must be
1668 * called under RTNL. This is needed if received packets may be
1669 * forwarded to another interface.
1670 */
1671void dev_disable_lro(struct net_device *dev)
1672{
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001673 struct net_device *lower_dev;
1674 struct list_head *iter;
Michal Kubeček529d0482013-11-15 06:18:50 +01001675
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001676 dev->wanted_features &= ~NETIF_F_LRO;
1677 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001678
Michał Mirosław22d59692011-04-21 12:42:15 +00001679 if (unlikely(dev->features & NETIF_F_LRO))
1680 netdev_WARN(dev, "failed to disable LRO!\n");
Michal Kubečekfbe168b2014-11-13 07:54:50 +01001681
1682 netdev_for_each_lower_dev(dev, lower_dev, iter)
1683 dev_disable_lro(lower_dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001684}
1685EXPORT_SYMBOL(dev_disable_lro);
1686
Michael Chan56f5aa72017-12-16 03:09:41 -05001687/**
1688 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1689 * @dev: device
1690 *
1691 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1692 * called under RTNL. This is needed if Generic XDP is installed on
1693 * the device.
1694 */
1695static void dev_disable_gro_hw(struct net_device *dev)
1696{
1697 dev->wanted_features &= ~NETIF_F_GRO_HW;
1698 netdev_update_features(dev);
1699
1700 if (unlikely(dev->features & NETIF_F_GRO_HW))
1701 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1702}
1703
Kirill Tkhaiede27622018-03-23 19:47:19 +03001704const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1705{
1706#define N(val) \
1707 case NETDEV_##val: \
1708 return "NETDEV_" __stringify(val);
1709 switch (cmd) {
1710 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1711 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1712 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1713 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1714 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1715 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1716 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
Gal Pressman9daae9b2018-03-28 17:46:54 +03001717 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1718 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
Petr Machata15704152018-12-13 11:54:33 +00001719 N(PRE_CHANGEADDR)
Kirill Tkhai3f5ecd82018-04-26 15:18:38 +03001720 }
Kirill Tkhaiede27622018-03-23 19:47:19 +03001721#undef N
1722 return "UNKNOWN_NETDEV_EVENT";
1723}
1724EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1725
Jiri Pirko351638e2013-05-28 01:30:21 +00001726static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1727 struct net_device *dev)
1728{
David Ahern51d0c0472017-10-04 17:48:45 -07001729 struct netdev_notifier_info info = {
1730 .dev = dev,
1731 };
Jiri Pirko351638e2013-05-28 01:30:21 +00001732
Jiri Pirko351638e2013-05-28 01:30:21 +00001733 return nb->notifier_call(nb, val, &info);
1734}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001735
Jiri Pirkoafa0df52019-09-30 10:15:09 +02001736static int call_netdevice_register_notifiers(struct notifier_block *nb,
1737 struct net_device *dev)
1738{
1739 int err;
1740
1741 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1742 err = notifier_to_errno(err);
1743 if (err)
1744 return err;
1745
1746 if (!(dev->flags & IFF_UP))
1747 return 0;
1748
1749 call_netdevice_notifier(nb, NETDEV_UP, dev);
1750 return 0;
1751}
1752
1753static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1754 struct net_device *dev)
1755{
1756 if (dev->flags & IFF_UP) {
1757 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1758 dev);
1759 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1760 }
1761 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1762}
1763
1764static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1765 struct net *net)
1766{
1767 struct net_device *dev;
1768 int err;
1769
1770 for_each_netdev(net, dev) {
1771 err = call_netdevice_register_notifiers(nb, dev);
1772 if (err)
1773 goto rollback;
1774 }
1775 return 0;
1776
1777rollback:
1778 for_each_netdev_continue_reverse(net, dev)
1779 call_netdevice_unregister_notifiers(nb, dev);
1780 return err;
1781}
1782
1783static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1784 struct net *net)
1785{
1786 struct net_device *dev;
1787
1788 for_each_netdev(net, dev)
1789 call_netdevice_unregister_notifiers(nb, dev);
1790}
1791
Eric W. Biederman881d9662007-09-17 11:56:21 -07001792static int dev_boot_phase = 1;
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794/**
tcharding722c9a02017-02-09 17:56:04 +11001795 * register_netdevice_notifier - register a network notifier block
1796 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 *
tcharding722c9a02017-02-09 17:56:04 +11001798 * Register a notifier to be called when network device events occur.
1799 * The notifier passed is linked into the kernel structures and must
1800 * not be reused until it has been unregistered. A negative errno code
1801 * is returned on a failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 *
tcharding722c9a02017-02-09 17:56:04 +11001803 * When registered all registration and up events are replayed
1804 * to the new notifier to allow device to have a race free
1805 * view of the network device list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 */
1807
1808int register_netdevice_notifier(struct notifier_block *nb)
1809{
Eric W. Biederman881d9662007-09-17 11:56:21 -07001810 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 int err;
1812
Kirill Tkhai328fbe72018-03-29 17:03:45 +03001813 /* Close race with setup_net() and cleanup_net() */
1814 down_write(&pernet_ops_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001816 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001817 if (err)
1818 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001819 if (dev_boot_phase)
1820 goto unlock;
1821 for_each_net(net) {
Jiri Pirkoafa0df52019-09-30 10:15:09 +02001822 err = call_netdevice_register_net_notifiers(nb, net);
1823 if (err)
1824 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001826
1827unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 rtnl_unlock();
Kirill Tkhai328fbe72018-03-29 17:03:45 +03001829 up_write(&pernet_ops_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001831
1832rollback:
Jiri Pirkoafa0df52019-09-30 10:15:09 +02001833 for_each_net_continue_reverse(net)
1834 call_netdevice_unregister_net_notifiers(nb, net);
Herbert Xufcc5a032007-07-30 17:03:38 -07001835
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001836 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001837 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001839EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
1841/**
tcharding722c9a02017-02-09 17:56:04 +11001842 * unregister_netdevice_notifier - unregister a network notifier block
1843 * @nb: notifier
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 *
tcharding722c9a02017-02-09 17:56:04 +11001845 * Unregister a notifier previously registered by
1846 * register_netdevice_notifier(). The notifier is unlinked into the
1847 * kernel structures and may then be reused. A negative errno code
1848 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001849 *
tcharding722c9a02017-02-09 17:56:04 +11001850 * After unregistering unregister and down device events are synthesized
1851 * for all devices on the device list to the removed notifier to remove
1852 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 */
1854
1855int unregister_netdevice_notifier(struct notifier_block *nb)
1856{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001857 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001858 int err;
1859
Kirill Tkhai328fbe72018-03-29 17:03:45 +03001860 /* Close race with setup_net() and cleanup_net() */
1861 down_write(&pernet_ops_rwsem);
Herbert Xu9f514952006-03-25 01:24:25 -08001862 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001863 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001864 if (err)
1865 goto unlock;
1866
Jiri Pirko48b3a132020-01-25 12:17:06 +01001867 for_each_net(net)
1868 call_netdevice_unregister_net_notifiers(nb, net);
1869
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001870unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001871 rtnl_unlock();
Kirill Tkhai328fbe72018-03-29 17:03:45 +03001872 up_write(&pernet_ops_rwsem);
Herbert Xu9f514952006-03-25 01:24:25 -08001873 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001875EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
Jiri Pirko1f637702020-01-25 12:17:07 +01001877static int __register_netdevice_notifier_net(struct net *net,
1878 struct notifier_block *nb,
1879 bool ignore_call_fail)
1880{
1881 int err;
1882
1883 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1884 if (err)
1885 return err;
1886 if (dev_boot_phase)
1887 return 0;
1888
1889 err = call_netdevice_register_net_notifiers(nb, net);
1890 if (err && !ignore_call_fail)
1891 goto chain_unregister;
1892
1893 return 0;
1894
1895chain_unregister:
1896 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1897 return err;
1898}
1899
1900static int __unregister_netdevice_notifier_net(struct net *net,
1901 struct notifier_block *nb)
1902{
1903 int err;
1904
1905 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1906 if (err)
1907 return err;
1908
1909 call_netdevice_unregister_net_notifiers(nb, net);
1910 return 0;
1911}
1912
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913/**
Jiri Pirkoa30c7b42019-09-30 10:15:10 +02001914 * register_netdevice_notifier_net - register a per-netns network notifier block
1915 * @net: network namespace
1916 * @nb: notifier
1917 *
1918 * Register a notifier to be called when network device events occur.
1919 * The notifier passed is linked into the kernel structures and must
1920 * not be reused until it has been unregistered. A negative errno code
1921 * is returned on a failure.
1922 *
1923 * When registered all registration and up events are replayed
1924 * to the new notifier to allow device to have a race free
1925 * view of the network device list.
1926 */
1927
1928int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1929{
1930 int err;
1931
1932 rtnl_lock();
Jiri Pirko1f637702020-01-25 12:17:07 +01001933 err = __register_netdevice_notifier_net(net, nb, false);
Jiri Pirkoa30c7b42019-09-30 10:15:10 +02001934 rtnl_unlock();
1935 return err;
Jiri Pirkoa30c7b42019-09-30 10:15:10 +02001936}
1937EXPORT_SYMBOL(register_netdevice_notifier_net);
1938
1939/**
1940 * unregister_netdevice_notifier_net - unregister a per-netns
1941 * network notifier block
1942 * @net: network namespace
1943 * @nb: notifier
1944 *
1945 * Unregister a notifier previously registered by
1946 * register_netdevice_notifier(). The notifier is unlinked into the
1947 * kernel structures and may then be reused. A negative errno code
1948 * is returned on a failure.
1949 *
1950 * After unregistering unregister and down device events are synthesized
1951 * for all devices on the device list to the removed notifier to remove
1952 * the need for special case cleanup code.
1953 */
1954
1955int unregister_netdevice_notifier_net(struct net *net,
1956 struct notifier_block *nb)
1957{
1958 int err;
1959
1960 rtnl_lock();
Jiri Pirko1f637702020-01-25 12:17:07 +01001961 err = __unregister_netdevice_notifier_net(net, nb);
Jiri Pirkoa30c7b42019-09-30 10:15:10 +02001962 rtnl_unlock();
1963 return err;
1964}
1965EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1966
Jiri Pirko93642e12020-01-25 12:17:08 +01001967int register_netdevice_notifier_dev_net(struct net_device *dev,
1968 struct notifier_block *nb,
1969 struct netdev_net_notifier *nn)
1970{
1971 int err;
1972
1973 rtnl_lock();
1974 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1975 if (!err) {
1976 nn->nb = nb;
1977 list_add(&nn->list, &dev->net_notifier_list);
1978 }
1979 rtnl_unlock();
1980 return err;
1981}
1982EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1983
1984int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1985 struct notifier_block *nb,
1986 struct netdev_net_notifier *nn)
1987{
1988 int err;
1989
1990 rtnl_lock();
1991 list_del(&nn->list);
1992 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1993 rtnl_unlock();
1994 return err;
1995}
1996EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1997
1998static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1999 struct net *net)
2000{
2001 struct netdev_net_notifier *nn;
2002
2003 list_for_each_entry(nn, &dev->net_notifier_list, list) {
2004 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
2005 __register_netdevice_notifier_net(net, nn->nb, true);
2006 }
2007}
2008
Jiri Pirkoa30c7b42019-09-30 10:15:10 +02002009/**
Jiri Pirko351638e2013-05-28 01:30:21 +00002010 * call_netdevice_notifiers_info - call all network notifier blocks
2011 * @val: value passed unmodified to notifier function
Jiri Pirko351638e2013-05-28 01:30:21 +00002012 * @info: notifier information data
2013 *
2014 * Call all network notifier blocks. Parameters and return value
2015 * are as for raw_notifier_call_chain().
2016 */
2017
stephen hemminger1d143d92013-12-29 14:01:29 -08002018static int call_netdevice_notifiers_info(unsigned long val,
stephen hemminger1d143d92013-12-29 14:01:29 -08002019 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00002020{
Jiri Pirkoa30c7b42019-09-30 10:15:10 +02002021 struct net *net = dev_net(info->dev);
2022 int ret;
2023
Jiri Pirko351638e2013-05-28 01:30:21 +00002024 ASSERT_RTNL();
Jiri Pirkoa30c7b42019-09-30 10:15:10 +02002025
2026 /* Run per-netns notifier block chain first, then run the global one.
2027 * Hopefully, one day, the global one is going to be removed after
2028 * all notifier block registrators get converted to be per-netns.
2029 */
2030 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2031 if (ret & NOTIFY_STOP_MASK)
2032 return ret;
Jiri Pirko351638e2013-05-28 01:30:21 +00002033 return raw_notifier_call_chain(&netdev_chain, val, info);
2034}
Jiri Pirko351638e2013-05-28 01:30:21 +00002035
Petr Machata26372602018-12-06 17:05:45 +00002036static int call_netdevice_notifiers_extack(unsigned long val,
2037 struct net_device *dev,
2038 struct netlink_ext_ack *extack)
2039{
2040 struct netdev_notifier_info info = {
2041 .dev = dev,
2042 .extack = extack,
2043 };
2044
2045 return call_netdevice_notifiers_info(val, &info);
2046}
2047
Jiri Pirko351638e2013-05-28 01:30:21 +00002048/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 * call_netdevice_notifiers - call all network notifier blocks
2050 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002051 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 *
2053 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07002054 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 */
2056
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07002057int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058{
Petr Machata26372602018-12-06 17:05:45 +00002059 return call_netdevice_notifiers_extack(val, dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060}
stephen hemmingeredf947f2011-03-24 13:24:01 +00002061EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Sabrina Dubrocaaf7d6cc2018-10-09 17:48:14 +02002063/**
2064 * call_netdevice_notifiers_mtu - call all network notifier blocks
2065 * @val: value passed unmodified to notifier function
2066 * @dev: net_device pointer passed unmodified to notifier function
2067 * @arg: additional u32 argument passed to the notifier function
2068 *
2069 * Call all network notifier blocks. Parameters and return value
2070 * are as for raw_notifier_call_chain().
2071 */
2072static int call_netdevice_notifiers_mtu(unsigned long val,
2073 struct net_device *dev, u32 arg)
2074{
2075 struct netdev_notifier_info_ext info = {
2076 .info.dev = dev,
2077 .ext.mtu = arg,
2078 };
2079
2080 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2081
2082 return call_netdevice_notifiers_info(val, &info.info);
2083}
2084
Pablo Neira1cf519002015-05-13 18:19:37 +02002085#ifdef CONFIG_NET_INGRESS
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07002086static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
Daniel Borkmann45771392015-04-10 23:07:54 +02002087
2088void net_inc_ingress_queue(void)
2089{
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07002090 static_branch_inc(&ingress_needed_key);
Daniel Borkmann45771392015-04-10 23:07:54 +02002091}
2092EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2093
2094void net_dec_ingress_queue(void)
2095{
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07002096 static_branch_dec(&ingress_needed_key);
Daniel Borkmann45771392015-04-10 23:07:54 +02002097}
2098EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2099#endif
2100
Daniel Borkmann1f211a12016-01-07 22:29:47 +01002101#ifdef CONFIG_NET_EGRESS
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07002102static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01002103
2104void net_inc_egress_queue(void)
2105{
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07002106 static_branch_inc(&egress_needed_key);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01002107}
2108EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2109
2110void net_dec_egress_queue(void)
2111{
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07002112 static_branch_dec(&egress_needed_key);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01002113}
2114EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2115#endif
2116
Davidlohr Bueso39e83922018-05-08 09:07:01 -07002117static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
Masahiro Yamadae9666d12018-12-31 00:14:15 +09002118#ifdef CONFIG_JUMP_LABEL
Eric Dumazetb90e5792011-11-28 11:16:50 +00002119static atomic_t netstamp_needed_deferred;
Eric Dumazet13baa002017-03-01 14:28:39 -08002120static atomic_t netstamp_wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08002121static void netstamp_clear(struct work_struct *work)
2122{
2123 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
Eric Dumazet13baa002017-03-01 14:28:39 -08002124 int wanted;
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08002125
Eric Dumazet13baa002017-03-01 14:28:39 -08002126 wanted = atomic_add_return(deferred, &netstamp_wanted);
2127 if (wanted > 0)
Davidlohr Bueso39e83922018-05-08 09:07:01 -07002128 static_branch_enable(&netstamp_needed_key);
Eric Dumazet13baa002017-03-01 14:28:39 -08002129 else
Davidlohr Bueso39e83922018-05-08 09:07:01 -07002130 static_branch_disable(&netstamp_needed_key);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08002131}
2132static DECLARE_WORK(netstamp_work, netstamp_clear);
Eric Dumazetb90e5792011-11-28 11:16:50 +00002133#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
2135void net_enable_timestamp(void)
2136{
Masahiro Yamadae9666d12018-12-31 00:14:15 +09002137#ifdef CONFIG_JUMP_LABEL
Eric Dumazet13baa002017-03-01 14:28:39 -08002138 int wanted;
2139
2140 while (1) {
2141 wanted = atomic_read(&netstamp_wanted);
2142 if (wanted <= 0)
2143 break;
2144 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2145 return;
2146 }
2147 atomic_inc(&netstamp_needed_deferred);
2148 schedule_work(&netstamp_work);
2149#else
Davidlohr Bueso39e83922018-05-08 09:07:01 -07002150 static_branch_inc(&netstamp_needed_key);
Eric Dumazet13baa002017-03-01 14:28:39 -08002151#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002153EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
2155void net_disable_timestamp(void)
2156{
Masahiro Yamadae9666d12018-12-31 00:14:15 +09002157#ifdef CONFIG_JUMP_LABEL
Eric Dumazet13baa002017-03-01 14:28:39 -08002158 int wanted;
2159
2160 while (1) {
2161 wanted = atomic_read(&netstamp_wanted);
2162 if (wanted <= 1)
2163 break;
2164 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2165 return;
2166 }
2167 atomic_dec(&netstamp_needed_deferred);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08002168 schedule_work(&netstamp_work);
2169#else
Davidlohr Bueso39e83922018-05-08 09:07:01 -07002170 static_branch_dec(&netstamp_needed_key);
Eric Dumazet5fa8bbd2017-02-02 10:31:35 -08002171#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002173EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Eric Dumazet3b098e22010-05-15 23:57:10 -07002175static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176{
Thomas Gleixner2456e852016-12-25 11:38:40 +01002177 skb->tstamp = 0;
Davidlohr Bueso39e83922018-05-08 09:07:01 -07002178 if (static_branch_unlikely(&netstamp_needed_key))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002179 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180}
2181
Davidlohr Bueso39e83922018-05-08 09:07:01 -07002182#define net_timestamp_check(COND, SKB) \
2183 if (static_branch_unlikely(&netstamp_needed_key)) { \
2184 if ((COND) && !(SKB)->tstamp) \
2185 __net_timestamp(SKB); \
2186 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07002187
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02002188bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07002189{
2190 unsigned int len;
2191
2192 if (!(dev->flags & IFF_UP))
2193 return false;
2194
2195 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
2196 if (skb->len <= len)
2197 return true;
2198
2199 /* if TSO is enabled, we don't care about the length as the packet
2200 * could be forwarded without being segmented before
2201 */
2202 if (skb_is_gso(skb))
2203 return true;
2204
2205 return false;
2206}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04002207EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07002208
Herbert Xua0265d22014-04-17 13:45:03 +08002209int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2210{
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08002211 int ret = ____dev_forward_skb(dev, skb);
2212
2213 if (likely(!ret)) {
2214 skb->protocol = eth_type_trans(skb, dev);
2215 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
Herbert Xua0265d22014-04-17 13:45:03 +08002216 }
2217
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08002218 return ret;
Herbert Xua0265d22014-04-17 13:45:03 +08002219}
2220EXPORT_SYMBOL_GPL(__dev_forward_skb);
2221
Arnd Bergmann44540962009-11-26 06:07:08 +00002222/**
2223 * dev_forward_skb - loopback an skb to another netif
2224 *
2225 * @dev: destination network device
2226 * @skb: buffer to forward
2227 *
2228 * return values:
2229 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07002230 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00002231 *
2232 * dev_forward_skb can be used for injecting an skb from the
2233 * start_xmit function of one device into the receive queue
2234 * of another device.
2235 *
2236 * The receiving device may be in another namespace, so
2237 * we have to clear all information in the skb that could
2238 * impact namespace isolation.
2239 */
2240int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2241{
Herbert Xua0265d22014-04-17 13:45:03 +08002242 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00002243}
2244EXPORT_SYMBOL_GPL(dev_forward_skb);
2245
Changli Gao71d9dec2010-12-15 19:57:25 +00002246static inline int deliver_skb(struct sk_buff *skb,
2247 struct packet_type *pt_prev,
2248 struct net_device *orig_dev)
2249{
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04002250 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00002251 return -ENOMEM;
Reshetova, Elena63354792017-06-30 13:07:58 +03002252 refcount_inc(&skb->users);
Changli Gao71d9dec2010-12-15 19:57:25 +00002253 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2254}
2255
Salam Noureddine7866a622015-01-27 11:35:48 -08002256static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2257 struct packet_type **pt,
Jiri Pirkofbcb2172015-03-30 16:56:01 +02002258 struct net_device *orig_dev,
2259 __be16 type,
Salam Noureddine7866a622015-01-27 11:35:48 -08002260 struct list_head *ptype_list)
2261{
2262 struct packet_type *ptype, *pt_prev = *pt;
2263
2264 list_for_each_entry_rcu(ptype, ptype_list, list) {
2265 if (ptype->type != type)
2266 continue;
2267 if (pt_prev)
Jiri Pirkofbcb2172015-03-30 16:56:01 +02002268 deliver_skb(skb, pt_prev, orig_dev);
Salam Noureddine7866a622015-01-27 11:35:48 -08002269 pt_prev = ptype;
2270 }
2271 *pt = pt_prev;
2272}
2273
Eric Leblondc0de08d2012-08-16 22:02:58 +00002274static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2275{
Eric Leblonda3d744e2012-11-06 02:10:10 +00002276 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00002277 return false;
2278
2279 if (ptype->id_match)
2280 return ptype->id_match(ptype, skb->sk);
2281 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2282 return true;
2283
2284 return false;
2285}
2286
Maciej W. Rozycki9f9a7422018-10-09 23:57:49 +01002287/**
2288 * dev_nit_active - return true if any network interface taps are in use
2289 *
2290 * @dev: network device to check for the presence of taps
2291 */
2292bool dev_nit_active(struct net_device *dev)
2293{
2294 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2295}
2296EXPORT_SYMBOL_GPL(dev_nit_active);
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298/*
2299 * Support routine. Sends outgoing frames to any network
2300 * taps currently in use.
2301 */
2302
David Ahern74b20582016-05-10 11:19:50 -07002303void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304{
2305 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00002306 struct sk_buff *skb2 = NULL;
2307 struct packet_type *pt_prev = NULL;
Salam Noureddine7866a622015-01-27 11:35:48 -08002308 struct list_head *ptype_list = &ptype_all;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 rcu_read_lock();
Salam Noureddine7866a622015-01-27 11:35:48 -08002311again:
2312 list_for_each_entry_rcu(ptype, ptype_list, list) {
Vincent Whitchurchfa788d92018-09-03 16:23:36 +02002313 if (ptype->ignore_outgoing)
2314 continue;
2315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 /* Never send packets back to the socket
2317 * they originated from - MvS (miquels@drinkel.ow.org)
2318 */
Salam Noureddine7866a622015-01-27 11:35:48 -08002319 if (skb_loop_sk(ptype, skb))
2320 continue;
Changli Gao71d9dec2010-12-15 19:57:25 +00002321
Salam Noureddine7866a622015-01-27 11:35:48 -08002322 if (pt_prev) {
2323 deliver_skb(skb2, pt_prev, skb->dev);
Changli Gao71d9dec2010-12-15 19:57:25 +00002324 pt_prev = ptype;
Salam Noureddine7866a622015-01-27 11:35:48 -08002325 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 }
Salam Noureddine7866a622015-01-27 11:35:48 -08002327
2328 /* need to clone skb, done only once */
2329 skb2 = skb_clone(skb, GFP_ATOMIC);
2330 if (!skb2)
2331 goto out_unlock;
2332
2333 net_timestamp_set(skb2);
2334
2335 /* skb->nh should be correctly
2336 * set by sender, so that the second statement is
2337 * just protection against buggy protocols.
2338 */
2339 skb_reset_mac_header(skb2);
2340
2341 if (skb_network_header(skb2) < skb2->data ||
2342 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2343 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2344 ntohs(skb2->protocol),
2345 dev->name);
2346 skb_reset_network_header(skb2);
2347 }
2348
2349 skb2->transport_header = skb2->network_header;
2350 skb2->pkt_type = PACKET_OUTGOING;
2351 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 }
Salam Noureddine7866a622015-01-27 11:35:48 -08002353
2354 if (ptype_list == &ptype_all) {
2355 ptype_list = &dev->ptype_all;
2356 goto again;
2357 }
2358out_unlock:
Willem de Bruijn581fe0e2017-09-22 19:42:37 -04002359 if (pt_prev) {
2360 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2361 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2362 else
2363 kfree_skb(skb2);
2364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 rcu_read_unlock();
2366}
David Ahern74b20582016-05-10 11:19:50 -07002367EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Ben Hutchings2c530402012-07-10 10:55:09 +00002369/**
2370 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00002371 * @dev: Network device
2372 * @txq: number of queues available
2373 *
2374 * If real_num_tx_queues is changed the tc mappings may no longer be
2375 * valid. To resolve this verify the tc mapping remains valid and if
2376 * not NULL the mapping. With no priorities mapping to this
2377 * offset/count pair it will no longer be used. In the worst case TC0
2378 * is invalid nothing can be done so disable priority mappings. If is
2379 * expected that drivers will fix this mapping if they can before
2380 * calling netif_set_real_num_tx_queues.
2381 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00002382static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00002383{
2384 int i;
2385 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2386
2387 /* If TC0 is invalidated disable TC mapping */
2388 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002389 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00002390 dev->num_tc = 0;
2391 return;
2392 }
2393
2394 /* Invalidated prio to tc mappings set to TC0 */
2395 for (i = 1; i < TC_BITMASK + 1; i++) {
2396 int q = netdev_get_prio_tc_map(dev, i);
2397
2398 tc = &dev->tc_to_txq[q];
2399 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002400 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2401 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00002402 netdev_set_prio_tc_map(dev, i, 0);
2403 }
2404 }
2405}
2406
Alexander Duyck8d059b02016-10-28 11:43:49 -04002407int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2408{
2409 if (dev->num_tc) {
2410 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2411 int i;
2412
Alexander Duyckffcfe252018-07-09 12:19:38 -04002413 /* walk through the TCs and see if it falls into any of them */
Alexander Duyck8d059b02016-10-28 11:43:49 -04002414 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2415 if ((txq - tc->offset) < tc->count)
2416 return i;
2417 }
2418
Alexander Duyckffcfe252018-07-09 12:19:38 -04002419 /* didn't find it, just return -1 to indicate no match */
Alexander Duyck8d059b02016-10-28 11:43:49 -04002420 return -1;
2421 }
2422
2423 return 0;
2424}
Henrik Austad8a5f2162017-10-17 12:10:10 +02002425EXPORT_SYMBOL(netdev_txq_to_tc);
Alexander Duyck8d059b02016-10-28 11:43:49 -04002426
Alexander Duyck537c00d2013-01-10 08:57:02 +00002427#ifdef CONFIG_XPS
Amritha Nambiar04157462018-06-29 21:26:46 -07002428struct static_key xps_needed __read_mostly;
2429EXPORT_SYMBOL(xps_needed);
2430struct static_key xps_rxqs_needed __read_mostly;
2431EXPORT_SYMBOL(xps_rxqs_needed);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002432static DEFINE_MUTEX(xps_map_mutex);
2433#define xmap_dereference(P) \
2434 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2435
Alexander Duyck6234f872016-10-28 11:46:49 -04002436static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2437 int tci, u16 index)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002438{
2439 struct xps_map *map = NULL;
2440 int pos;
2441
2442 if (dev_maps)
Amritha Nambiar80d19662018-06-29 21:26:41 -07002443 map = xmap_dereference(dev_maps->attr_map[tci]);
Alexander Duyck6234f872016-10-28 11:46:49 -04002444 if (!map)
2445 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002446
Alexander Duyck6234f872016-10-28 11:46:49 -04002447 for (pos = map->len; pos--;) {
2448 if (map->queues[pos] != index)
2449 continue;
2450
2451 if (map->len > 1) {
2452 map->queues[pos] = map->queues[--map->len];
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002453 break;
2454 }
Alexander Duyck6234f872016-10-28 11:46:49 -04002455
Amritha Nambiar80d19662018-06-29 21:26:41 -07002456 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
Alexander Duyck6234f872016-10-28 11:46:49 -04002457 kfree_rcu(map, rcu);
2458 return false;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002459 }
2460
Alexander Duyck6234f872016-10-28 11:46:49 -04002461 return true;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00002462}
2463
Alexander Duyck6234f872016-10-28 11:46:49 -04002464static bool remove_xps_queue_cpu(struct net_device *dev,
2465 struct xps_dev_maps *dev_maps,
2466 int cpu, u16 offset, u16 count)
2467{
Alexander Duyck184c4492016-10-28 11:50:13 -04002468 int num_tc = dev->num_tc ? : 1;
2469 bool active = false;
2470 int tci;
Alexander Duyck6234f872016-10-28 11:46:49 -04002471
Alexander Duyck184c4492016-10-28 11:50:13 -04002472 for (tci = cpu * num_tc; num_tc--; tci++) {
2473 int i, j;
2474
2475 for (i = count, j = offset; i--; j++) {
Amritha Nambiar6358d492018-05-17 14:50:44 -07002476 if (!remove_xps_queue(dev_maps, tci, j))
Alexander Duyck184c4492016-10-28 11:50:13 -04002477 break;
2478 }
2479
2480 active |= i < 0;
Alexander Duyck6234f872016-10-28 11:46:49 -04002481 }
2482
Alexander Duyck184c4492016-10-28 11:50:13 -04002483 return active;
Alexander Duyck6234f872016-10-28 11:46:49 -04002484}
2485
Sabrina Dubroca867d0ad2018-11-29 14:14:49 +01002486static void reset_xps_maps(struct net_device *dev,
2487 struct xps_dev_maps *dev_maps,
2488 bool is_rxqs_map)
2489{
2490 if (is_rxqs_map) {
2491 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2492 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2493 } else {
2494 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2495 }
2496 static_key_slow_dec_cpuslocked(&xps_needed);
2497 kfree_rcu(dev_maps, rcu);
2498}
2499
Amritha Nambiar80d19662018-06-29 21:26:41 -07002500static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2501 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2502 u16 offset, u16 count, bool is_rxqs_map)
2503{
2504 bool active = false;
2505 int i, j;
2506
2507 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2508 j < nr_ids;)
2509 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2510 count);
Sabrina Dubroca867d0ad2018-11-29 14:14:49 +01002511 if (!active)
2512 reset_xps_maps(dev, dev_maps, is_rxqs_map);
Amritha Nambiar80d19662018-06-29 21:26:41 -07002513
Sabrina Dubrocaf28c0202018-11-29 14:14:48 +01002514 if (!is_rxqs_map) {
2515 for (i = offset + (count - 1); count--; i--) {
2516 netdev_queue_numa_node_write(
2517 netdev_get_tx_queue(dev, i),
2518 NUMA_NO_NODE);
Amritha Nambiar80d19662018-06-29 21:26:41 -07002519 }
Amritha Nambiar80d19662018-06-29 21:26:41 -07002520 }
2521}
2522
Alexander Duyck6234f872016-10-28 11:46:49 -04002523static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2524 u16 count)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002525{
Amritha Nambiar80d19662018-06-29 21:26:41 -07002526 const unsigned long *possible_mask = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002527 struct xps_dev_maps *dev_maps;
Amritha Nambiar80d19662018-06-29 21:26:41 -07002528 unsigned int nr_ids;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002529
Amritha Nambiar04157462018-06-29 21:26:46 -07002530 if (!static_key_false(&xps_needed))
2531 return;
2532
Andrei Vagin4d99f662018-08-08 20:07:35 -07002533 cpus_read_lock();
Alexander Duyck537c00d2013-01-10 08:57:02 +00002534 mutex_lock(&xps_map_mutex);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002535
Amritha Nambiar04157462018-06-29 21:26:46 -07002536 if (static_key_false(&xps_rxqs_needed)) {
2537 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2538 if (dev_maps) {
2539 nr_ids = dev->num_rx_queues;
2540 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2541 offset, count, true);
2542 }
Amritha Nambiar80d19662018-06-29 21:26:41 -07002543 }
2544
2545 dev_maps = xmap_dereference(dev->xps_cpus_map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002546 if (!dev_maps)
2547 goto out_no_maps;
2548
Amritha Nambiar80d19662018-06-29 21:26:41 -07002549 if (num_possible_cpus() > 1)
2550 possible_mask = cpumask_bits(cpu_possible_mask);
2551 nr_ids = nr_cpu_ids;
2552 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2553 false);
Alexander Duyck024e9672013-01-10 08:57:46 +00002554
Alexander Duyck537c00d2013-01-10 08:57:02 +00002555out_no_maps:
2556 mutex_unlock(&xps_map_mutex);
Andrei Vagin4d99f662018-08-08 20:07:35 -07002557 cpus_read_unlock();
Alexander Duyck537c00d2013-01-10 08:57:02 +00002558}
2559
Alexander Duyck6234f872016-10-28 11:46:49 -04002560static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2561{
2562 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2563}
2564
Amritha Nambiar80d19662018-06-29 21:26:41 -07002565static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2566 u16 index, bool is_rxqs_map)
Alexander Duyck01c5f862013-01-10 08:57:35 +00002567{
2568 struct xps_map *new_map;
2569 int alloc_len = XPS_MIN_MAP_ALLOC;
2570 int i, pos;
2571
2572 for (pos = 0; map && pos < map->len; pos++) {
2573 if (map->queues[pos] != index)
2574 continue;
2575 return map;
2576 }
2577
Amritha Nambiar80d19662018-06-29 21:26:41 -07002578 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002579 if (map) {
2580 if (pos < map->alloc_len)
2581 return map;
2582
2583 alloc_len = map->alloc_len * 2;
2584 }
2585
Amritha Nambiar80d19662018-06-29 21:26:41 -07002586 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2587 * map
2588 */
2589 if (is_rxqs_map)
2590 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2591 else
2592 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2593 cpu_to_node(attr_index));
Alexander Duyck01c5f862013-01-10 08:57:35 +00002594 if (!new_map)
2595 return NULL;
2596
2597 for (i = 0; i < pos; i++)
2598 new_map->queues[i] = map->queues[i];
2599 new_map->alloc_len = alloc_len;
2600 new_map->len = pos;
2601
2602 return new_map;
2603}
2604
Andrei Vagin4d99f662018-08-08 20:07:35 -07002605/* Must be called under cpus_read_lock */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002606int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2607 u16 index, bool is_rxqs_map)
Alexander Duyck537c00d2013-01-10 08:57:02 +00002608{
Amritha Nambiar80d19662018-06-29 21:26:41 -07002609 const unsigned long *online_mask = NULL, *possible_mask = NULL;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002610 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Amritha Nambiar80d19662018-06-29 21:26:41 -07002611 int i, j, tci, numa_node_id = -2;
Alexander Duyck184c4492016-10-28 11:50:13 -04002612 int maps_sz, num_tc = 1, tc = 0;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002613 struct xps_map *map, *new_map;
Alexander Duyck01c5f862013-01-10 08:57:35 +00002614 bool active = false;
Amritha Nambiar80d19662018-06-29 21:26:41 -07002615 unsigned int nr_ids;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002616
Alexander Duyck184c4492016-10-28 11:50:13 -04002617 if (dev->num_tc) {
Alexander Duyckffcfe252018-07-09 12:19:38 -04002618 /* Do not allow XPS on subordinate device directly */
Alexander Duyck184c4492016-10-28 11:50:13 -04002619 num_tc = dev->num_tc;
Alexander Duyckffcfe252018-07-09 12:19:38 -04002620 if (num_tc < 0)
2621 return -EINVAL;
2622
2623 /* If queue belongs to subordinate dev use its map */
2624 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2625
Alexander Duyck184c4492016-10-28 11:50:13 -04002626 tc = netdev_txq_to_tc(dev, index);
2627 if (tc < 0)
2628 return -EINVAL;
2629 }
2630
Amritha Nambiar80d19662018-06-29 21:26:41 -07002631 mutex_lock(&xps_map_mutex);
2632 if (is_rxqs_map) {
2633 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2634 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2635 nr_ids = dev->num_rx_queues;
2636 } else {
2637 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2638 if (num_possible_cpus() > 1) {
2639 online_mask = cpumask_bits(cpu_online_mask);
2640 possible_mask = cpumask_bits(cpu_possible_mask);
2641 }
2642 dev_maps = xmap_dereference(dev->xps_cpus_map);
2643 nr_ids = nr_cpu_ids;
2644 }
2645
Alexander Duyck184c4492016-10-28 11:50:13 -04002646 if (maps_sz < L1_CACHE_BYTES)
2647 maps_sz = L1_CACHE_BYTES;
2648
Alexander Duyck01c5f862013-01-10 08:57:35 +00002649 /* allocate memory for queue storage */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002650 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2651 j < nr_ids;) {
Alexander Duyck01c5f862013-01-10 08:57:35 +00002652 if (!new_dev_maps)
2653 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002654 if (!new_dev_maps) {
2655 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002656 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00002657 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002658
Amritha Nambiar80d19662018-06-29 21:26:41 -07002659 tci = j * num_tc + tc;
2660 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
Alexander Duyck01c5f862013-01-10 08:57:35 +00002661 NULL;
2662
Amritha Nambiar80d19662018-06-29 21:26:41 -07002663 map = expand_xps_map(map, j, index, is_rxqs_map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002664 if (!map)
2665 goto error;
2666
Amritha Nambiar80d19662018-06-29 21:26:41 -07002667 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002668 }
2669
2670 if (!new_dev_maps)
2671 goto out_no_new_maps;
2672
Sabrina Dubroca867d0ad2018-11-29 14:14:49 +01002673 if (!dev_maps) {
2674 /* Increment static keys at most once per type */
2675 static_key_slow_inc_cpuslocked(&xps_needed);
2676 if (is_rxqs_map)
2677 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2678 }
Amritha Nambiar04157462018-06-29 21:26:46 -07002679
Amritha Nambiar80d19662018-06-29 21:26:41 -07002680 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2681 j < nr_ids;) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002682 /* copy maps belonging to foreign traffic classes */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002683 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
Alexander Duyck184c4492016-10-28 11:50:13 -04002684 /* fill in the new device map from the old device map */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002685 map = xmap_dereference(dev_maps->attr_map[tci]);
2686 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
Alexander Duyck184c4492016-10-28 11:50:13 -04002687 }
2688
2689 /* We need to explicitly update tci as prevous loop
2690 * could break out early if dev_maps is NULL.
2691 */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002692 tci = j * num_tc + tc;
Alexander Duyck184c4492016-10-28 11:50:13 -04002693
Amritha Nambiar80d19662018-06-29 21:26:41 -07002694 if (netif_attr_test_mask(j, mask, nr_ids) &&
2695 netif_attr_test_online(j, online_mask, nr_ids)) {
2696 /* add tx-queue to CPU/rx-queue maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002697 int pos = 0;
2698
Amritha Nambiar80d19662018-06-29 21:26:41 -07002699 map = xmap_dereference(new_dev_maps->attr_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002700 while ((pos < map->len) && (map->queues[pos] != index))
2701 pos++;
2702
2703 if (pos == map->len)
2704 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00002705#ifdef CONFIG_NUMA
Amritha Nambiar80d19662018-06-29 21:26:41 -07002706 if (!is_rxqs_map) {
2707 if (numa_node_id == -2)
2708 numa_node_id = cpu_to_node(j);
2709 else if (numa_node_id != cpu_to_node(j))
2710 numa_node_id = -1;
2711 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002712#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00002713 } else if (dev_maps) {
2714 /* fill in the new device map from the old device map */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002715 map = xmap_dereference(dev_maps->attr_map[tci]);
2716 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00002717 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002718
Alexander Duyck184c4492016-10-28 11:50:13 -04002719 /* copy maps belonging to foreign traffic classes */
2720 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2721 /* fill in the new device map from the old device map */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002722 map = xmap_dereference(dev_maps->attr_map[tci]);
2723 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
Alexander Duyck184c4492016-10-28 11:50:13 -04002724 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002725 }
2726
Amritha Nambiar80d19662018-06-29 21:26:41 -07002727 if (is_rxqs_map)
2728 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2729 else
2730 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002731
Alexander Duyck537c00d2013-01-10 08:57:02 +00002732 /* Cleanup old maps */
Alexander Duyck184c4492016-10-28 11:50:13 -04002733 if (!dev_maps)
2734 goto out_no_old_maps;
2735
Amritha Nambiar80d19662018-06-29 21:26:41 -07002736 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2737 j < nr_ids;) {
2738 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2739 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2740 map = xmap_dereference(dev_maps->attr_map[tci]);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002741 if (map && map != new_map)
2742 kfree_rcu(map, rcu);
2743 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002744 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002745
Alexander Duyck184c4492016-10-28 11:50:13 -04002746 kfree_rcu(dev_maps, rcu);
2747
2748out_no_old_maps:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002749 dev_maps = new_dev_maps;
2750 active = true;
2751
2752out_no_new_maps:
Amritha Nambiar80d19662018-06-29 21:26:41 -07002753 if (!is_rxqs_map) {
2754 /* update Tx queue numa node */
2755 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2756 (numa_node_id >= 0) ?
2757 numa_node_id : NUMA_NO_NODE);
2758 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002759
Alexander Duyck01c5f862013-01-10 08:57:35 +00002760 if (!dev_maps)
2761 goto out_no_maps;
2762
Amritha Nambiar80d19662018-06-29 21:26:41 -07002763 /* removes tx-queue from unused CPUs/rx-queues */
2764 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2765 j < nr_ids;) {
2766 for (i = tc, tci = j * num_tc; i--; tci++)
Alexander Duyck184c4492016-10-28 11:50:13 -04002767 active |= remove_xps_queue(dev_maps, tci, index);
Amritha Nambiar80d19662018-06-29 21:26:41 -07002768 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2769 !netif_attr_test_online(j, online_mask, nr_ids))
Alexander Duyck184c4492016-10-28 11:50:13 -04002770 active |= remove_xps_queue(dev_maps, tci, index);
2771 for (i = num_tc - tc, tci++; --i; tci++)
2772 active |= remove_xps_queue(dev_maps, tci, index);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002773 }
2774
2775 /* free map if not active */
Sabrina Dubroca867d0ad2018-11-29 14:14:49 +01002776 if (!active)
2777 reset_xps_maps(dev, dev_maps, is_rxqs_map);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002778
2779out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002780 mutex_unlock(&xps_map_mutex);
2781
2782 return 0;
2783error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002784 /* remove any maps that we added */
Amritha Nambiar80d19662018-06-29 21:26:41 -07002785 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2786 j < nr_ids;) {
2787 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2788 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
Alexander Duyck184c4492016-10-28 11:50:13 -04002789 map = dev_maps ?
Amritha Nambiar80d19662018-06-29 21:26:41 -07002790 xmap_dereference(dev_maps->attr_map[tci]) :
Alexander Duyck184c4492016-10-28 11:50:13 -04002791 NULL;
2792 if (new_map && new_map != map)
2793 kfree(new_map);
2794 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00002795 }
2796
Alexander Duyck537c00d2013-01-10 08:57:02 +00002797 mutex_unlock(&xps_map_mutex);
2798
Alexander Duyck537c00d2013-01-10 08:57:02 +00002799 kfree(new_dev_maps);
2800 return -ENOMEM;
2801}
Andrei Vagin4d99f662018-08-08 20:07:35 -07002802EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
Amritha Nambiar80d19662018-06-29 21:26:41 -07002803
2804int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2805 u16 index)
2806{
Andrei Vagin4d99f662018-08-08 20:07:35 -07002807 int ret;
2808
2809 cpus_read_lock();
2810 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2811 cpus_read_unlock();
2812
2813 return ret;
Amritha Nambiar80d19662018-06-29 21:26:41 -07002814}
Alexander Duyck537c00d2013-01-10 08:57:02 +00002815EXPORT_SYMBOL(netif_set_xps_queue);
2816
2817#endif
Alexander Duyckffcfe252018-07-09 12:19:38 -04002818static void netdev_unbind_all_sb_channels(struct net_device *dev)
2819{
2820 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2821
2822 /* Unbind any subordinate channels */
2823 while (txq-- != &dev->_tx[0]) {
2824 if (txq->sb_dev)
2825 netdev_unbind_sb_channel(dev, txq->sb_dev);
2826 }
2827}
2828
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002829void netdev_reset_tc(struct net_device *dev)
2830{
Alexander Duyck6234f872016-10-28 11:46:49 -04002831#ifdef CONFIG_XPS
2832 netif_reset_xps_queues_gt(dev, 0);
2833#endif
Alexander Duyckffcfe252018-07-09 12:19:38 -04002834 netdev_unbind_all_sb_channels(dev);
2835
2836 /* Reset TC configuration of device */
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002837 dev->num_tc = 0;
2838 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2839 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2840}
2841EXPORT_SYMBOL(netdev_reset_tc);
2842
2843int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2844{
2845 if (tc >= dev->num_tc)
2846 return -EINVAL;
2847
Alexander Duyck6234f872016-10-28 11:46:49 -04002848#ifdef CONFIG_XPS
2849 netif_reset_xps_queues(dev, offset, count);
2850#endif
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002851 dev->tc_to_txq[tc].count = count;
2852 dev->tc_to_txq[tc].offset = offset;
2853 return 0;
2854}
2855EXPORT_SYMBOL(netdev_set_tc_queue);
2856
2857int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2858{
2859 if (num_tc > TC_MAX_QUEUE)
2860 return -EINVAL;
2861
Alexander Duyck6234f872016-10-28 11:46:49 -04002862#ifdef CONFIG_XPS
2863 netif_reset_xps_queues_gt(dev, 0);
2864#endif
Alexander Duyckffcfe252018-07-09 12:19:38 -04002865 netdev_unbind_all_sb_channels(dev);
2866
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002867 dev->num_tc = num_tc;
2868 return 0;
2869}
2870EXPORT_SYMBOL(netdev_set_num_tc);
2871
Alexander Duyckffcfe252018-07-09 12:19:38 -04002872void netdev_unbind_sb_channel(struct net_device *dev,
2873 struct net_device *sb_dev)
2874{
2875 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2876
2877#ifdef CONFIG_XPS
2878 netif_reset_xps_queues_gt(sb_dev, 0);
2879#endif
2880 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2881 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2882
2883 while (txq-- != &dev->_tx[0]) {
2884 if (txq->sb_dev == sb_dev)
2885 txq->sb_dev = NULL;
2886 }
2887}
2888EXPORT_SYMBOL(netdev_unbind_sb_channel);
2889
2890int netdev_bind_sb_channel_queue(struct net_device *dev,
2891 struct net_device *sb_dev,
2892 u8 tc, u16 count, u16 offset)
2893{
2894 /* Make certain the sb_dev and dev are already configured */
2895 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2896 return -EINVAL;
2897
2898 /* We cannot hand out queues we don't have */
2899 if ((offset + count) > dev->real_num_tx_queues)
2900 return -EINVAL;
2901
2902 /* Record the mapping */
2903 sb_dev->tc_to_txq[tc].count = count;
2904 sb_dev->tc_to_txq[tc].offset = offset;
2905
2906 /* Provide a way for Tx queue to find the tc_to_txq map or
2907 * XPS map for itself.
2908 */
2909 while (count--)
2910 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2911
2912 return 0;
2913}
2914EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2915
2916int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2917{
2918 /* Do not use a multiqueue device to represent a subordinate channel */
2919 if (netif_is_multiqueue(dev))
2920 return -ENODEV;
2921
2922 /* We allow channels 1 - 32767 to be used for subordinate channels.
2923 * Channel 0 is meant to be "native" mode and used only to represent
2924 * the main root device. We allow writing 0 to reset the device back
2925 * to normal mode after being used as a subordinate channel.
2926 */
2927 if (channel > S16_MAX)
2928 return -EINVAL;
2929
2930 dev->num_tc = -channel;
2931
2932 return 0;
2933}
2934EXPORT_SYMBOL(netdev_set_sb_channel);
2935
John Fastabendf0796d52010-07-01 13:21:57 +00002936/*
2937 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
Gal Pressman3a053b12018-02-28 15:59:15 +02002938 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
John Fastabendf0796d52010-07-01 13:21:57 +00002939 */
Tom Herberte6484932010-10-18 18:04:39 +00002940int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002941{
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002942 bool disabling;
Tom Herbert1d24eb42010-11-21 13:17:27 +00002943 int rc;
2944
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002945 disabling = txq < dev->real_num_tx_queues;
2946
Tom Herberte6484932010-10-18 18:04:39 +00002947 if (txq < 1 || txq > dev->num_tx_queues)
2948 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002949
Ben Hutchings5c565802011-02-15 19:39:21 +00002950 if (dev->reg_state == NETREG_REGISTERED ||
2951 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002952 ASSERT_RTNL();
2953
Tom Herbert1d24eb42010-11-21 13:17:27 +00002954 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2955 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002956 if (rc)
2957 return rc;
2958
John Fastabend4f57c082011-01-17 08:06:04 +00002959 if (dev->num_tc)
2960 netif_setup_tc(dev, txq);
2961
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002962 dev->real_num_tx_queues = txq;
2963
2964 if (disabling) {
2965 synchronize_net();
Tom Herberte6484932010-10-18 18:04:39 +00002966 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002967#ifdef CONFIG_XPS
2968 netif_reset_xps_queues_gt(dev, txq);
2969#endif
2970 }
Jakub Kicinskiac5b7012018-02-12 21:35:31 -08002971 } else {
2972 dev->real_num_tx_queues = txq;
John Fastabendf0796d52010-07-01 13:21:57 +00002973 }
Tom Herberte6484932010-10-18 18:04:39 +00002974
Tom Herberte6484932010-10-18 18:04:39 +00002975 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002976}
2977EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002978
Michael Daltona953be52014-01-16 22:23:28 -08002979#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002980/**
2981 * netif_set_real_num_rx_queues - set actual number of RX queues used
2982 * @dev: Network device
2983 * @rxq: Actual number of RX queues
2984 *
2985 * This must be called either with the rtnl_lock held or before
2986 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002987 * negative error code. If called before registration, it always
2988 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002989 */
2990int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2991{
2992 int rc;
2993
Tom Herbertbd25fa72010-10-18 18:00:16 +00002994 if (rxq < 1 || rxq > dev->num_rx_queues)
2995 return -EINVAL;
2996
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002997 if (dev->reg_state == NETREG_REGISTERED) {
2998 ASSERT_RTNL();
2999
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003000 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
3001 rxq);
3002 if (rc)
3003 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003004 }
3005
3006 dev->real_num_rx_queues = rxq;
3007 return 0;
3008}
3009EXPORT_SYMBOL(netif_set_real_num_rx_queues);
3010#endif
3011
Ben Hutchings2c530402012-07-10 10:55:09 +00003012/**
3013 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00003014 *
3015 * This routine should set an upper limit on the number of RSS queues
3016 * used by default by multiqueue devices.
3017 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00003018int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00003019{
Hariprasad Shenai40e4e712016-06-08 18:09:08 +05303020 return is_kdump_kernel() ?
3021 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
Yuval Mintz16917b82012-07-01 03:18:50 +00003022}
3023EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3024
Eric Dumazet3bcb8462016-06-04 20:02:28 -07003025static void __netif_reschedule(struct Qdisc *q)
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003026{
3027 struct softnet_data *sd;
3028 unsigned long flags;
3029
3030 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05003031 sd = this_cpu_ptr(&softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00003032 q->next_sched = NULL;
3033 *sd->output_queue_tailp = q;
3034 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003035 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3036 local_irq_restore(flags);
3037}
3038
David S. Miller37437bb2008-07-16 02:15:04 -07003039void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08003040{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003041 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3042 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08003043}
3044EXPORT_SYMBOL(__netif_schedule);
3045
Eric Dumazete6247022013-12-05 04:45:08 -08003046struct dev_kfree_skb_cb {
3047 enum skb_free_reason reason;
3048};
3049
3050static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08003051{
Eric Dumazete6247022013-12-05 04:45:08 -08003052 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08003053}
Denis Vlasenko56079432006-03-29 15:57:29 -08003054
John Fastabend46e5da40a2014-09-12 20:04:52 -07003055void netif_schedule_queue(struct netdev_queue *txq)
3056{
3057 rcu_read_lock();
Julio Faracco5be55152019-10-01 11:39:04 -03003058 if (!netif_xmit_stopped(txq)) {
John Fastabend46e5da40a2014-09-12 20:04:52 -07003059 struct Qdisc *q = rcu_dereference(txq->qdisc);
3060
3061 __netif_schedule(q);
3062 }
3063 rcu_read_unlock();
3064}
3065EXPORT_SYMBOL(netif_schedule_queue);
3066
John Fastabend46e5da40a2014-09-12 20:04:52 -07003067void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3068{
3069 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3070 struct Qdisc *q;
3071
3072 rcu_read_lock();
3073 q = rcu_dereference(dev_queue->qdisc);
3074 __netif_schedule(q);
3075 rcu_read_unlock();
3076 }
3077}
3078EXPORT_SYMBOL(netif_tx_wake_queue);
3079
Eric Dumazete6247022013-12-05 04:45:08 -08003080void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3081{
3082 unsigned long flags;
3083
Myungho Jung98998862017-04-25 11:58:15 -07003084 if (unlikely(!skb))
3085 return;
3086
Reshetova, Elena63354792017-06-30 13:07:58 +03003087 if (likely(refcount_read(&skb->users) == 1)) {
Eric Dumazete6247022013-12-05 04:45:08 -08003088 smp_rmb();
Reshetova, Elena63354792017-06-30 13:07:58 +03003089 refcount_set(&skb->users, 0);
3090 } else if (likely(!refcount_dec_and_test(&skb->users))) {
Eric Dumazete6247022013-12-05 04:45:08 -08003091 return;
3092 }
3093 get_kfree_skb_cb(skb)->reason = reason;
3094 local_irq_save(flags);
3095 skb->next = __this_cpu_read(softnet_data.completion_queue);
3096 __this_cpu_write(softnet_data.completion_queue, skb);
3097 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3098 local_irq_restore(flags);
3099}
3100EXPORT_SYMBOL(__dev_kfree_skb_irq);
3101
3102void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08003103{
3104 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08003105 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08003106 else
3107 dev_kfree_skb(skb);
3108}
Eric Dumazete6247022013-12-05 04:45:08 -08003109EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08003110
3111
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003112/**
3113 * netif_device_detach - mark device as removed
3114 * @dev: network device
3115 *
3116 * Mark device as removed from system and therefore no longer available.
3117 */
Denis Vlasenko56079432006-03-29 15:57:29 -08003118void netif_device_detach(struct net_device *dev)
3119{
3120 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3121 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00003122 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08003123 }
3124}
3125EXPORT_SYMBOL(netif_device_detach);
3126
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003127/**
3128 * netif_device_attach - mark device as attached
3129 * @dev: network device
3130 *
3131 * Mark device as attached from system and restart if needed.
3132 */
Denis Vlasenko56079432006-03-29 15:57:29 -08003133void netif_device_attach(struct net_device *dev)
3134{
3135 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3136 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00003137 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003138 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08003139 }
3140}
3141EXPORT_SYMBOL(netif_device_attach);
3142
Jiri Pirko5605c762015-05-12 14:56:12 +02003143/*
3144 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3145 * to be used as a distribution range.
3146 */
Alexander Duyckeadec8772018-07-09 12:19:48 -04003147static u16 skb_tx_hash(const struct net_device *dev,
3148 const struct net_device *sb_dev,
3149 struct sk_buff *skb)
Jiri Pirko5605c762015-05-12 14:56:12 +02003150{
3151 u32 hash;
3152 u16 qoffset = 0;
Alexander Duyck1b837d42018-04-27 14:06:53 -04003153 u16 qcount = dev->real_num_tx_queues;
Jiri Pirko5605c762015-05-12 14:56:12 +02003154
Alexander Duyckeadec8772018-07-09 12:19:48 -04003155 if (dev->num_tc) {
3156 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3157
3158 qoffset = sb_dev->tc_to_txq[tc].offset;
3159 qcount = sb_dev->tc_to_txq[tc].count;
3160 }
3161
Jiri Pirko5605c762015-05-12 14:56:12 +02003162 if (skb_rx_queue_recorded(skb)) {
3163 hash = skb_get_rx_queue(skb);
Amritha Nambiar6e11d152020-02-24 10:56:00 -08003164 if (hash >= qoffset)
3165 hash -= qoffset;
Alexander Duyck1b837d42018-04-27 14:06:53 -04003166 while (unlikely(hash >= qcount))
3167 hash -= qcount;
Alexander Duyckeadec8772018-07-09 12:19:48 -04003168 return hash + qoffset;
Jiri Pirko5605c762015-05-12 14:56:12 +02003169 }
3170
3171 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3172}
Jiri Pirko5605c762015-05-12 14:56:12 +02003173
Ben Hutchings36c92472012-01-17 07:57:56 +00003174static void skb_warn_bad_offload(const struct sk_buff *skb)
3175{
Wei Tang84d15ae2016-06-16 21:17:49 +08003176 static const netdev_features_t null_features;
Ben Hutchings36c92472012-01-17 07:57:56 +00003177 struct net_device *dev = skb->dev;
Bjørn Mork88ad4172015-11-16 19:16:40 +01003178 const char *name = "";
Ben Hutchings36c92472012-01-17 07:57:56 +00003179
Ben Greearc846ad92013-04-19 10:45:52 +00003180 if (!net_ratelimit())
3181 return;
3182
Bjørn Mork88ad4172015-11-16 19:16:40 +01003183 if (dev) {
3184 if (dev->dev.parent)
3185 name = dev_driver_string(dev->dev.parent);
3186 else
3187 name = netdev_name(dev);
3188 }
Willem de Bruijn64131392019-07-07 05:51:55 -04003189 skb_dump(KERN_WARNING, skb, false);
3190 WARN(1, "%s: caps=(%pNF, %pNF)\n",
Bjørn Mork88ad4172015-11-16 19:16:40 +01003191 name, dev ? &dev->features : &null_features,
Willem de Bruijn64131392019-07-07 05:51:55 -04003192 skb->sk ? &skb->sk->sk_route_caps : &null_features);
Ben Hutchings36c92472012-01-17 07:57:56 +00003193}
3194
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195/*
3196 * Invalidate hardware checksum when packet is to be mangled, and
3197 * complete checksum manually on outgoing path.
3198 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003199int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200{
Al Virod3bc23e2006-11-14 21:24:49 -08003201 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07003202 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203
Patrick McHardy84fa7932006-08-29 16:44:56 -07003204 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07003205 goto out_set_summed;
3206
3207 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00003208 skb_warn_bad_offload(skb);
3209 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 }
3211
Eric Dumazetcef401d2013-01-25 20:34:37 +00003212 /* Before computing a checksum, we should make sure no frag could
3213 * be modified by an external entity : checksum could be wrong.
3214 */
3215 if (skb_has_shared_frag(skb)) {
3216 ret = __skb_linearize(skb);
3217 if (ret)
3218 goto out;
3219 }
3220
Michał Mirosław55508d62010-12-14 15:24:08 +00003221 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07003222 BUG_ON(offset >= skb_headlen(skb));
3223 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3224
3225 offset += skb->csum_offset;
3226 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3227
Heiner Kallweit8211fbf2019-10-06 18:52:43 +02003228 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3229 if (ret)
3230 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
Eric Dumazet4f2e4ad2016-10-29 11:02:36 -07003232 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
Herbert Xua430a432006-07-08 13:34:56 -07003233out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003235out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 return ret;
3237}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003238EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239
Davide Carattib72b5bf2017-05-18 15:44:38 +02003240int skb_crc32c_csum_help(struct sk_buff *skb)
3241{
3242 __le32 crc32c_csum;
3243 int ret = 0, offset, start;
3244
3245 if (skb->ip_summed != CHECKSUM_PARTIAL)
3246 goto out;
3247
3248 if (unlikely(skb_is_gso(skb)))
3249 goto out;
3250
3251 /* Before computing a checksum, we should make sure no frag could
3252 * be modified by an external entity : checksum could be wrong.
3253 */
3254 if (unlikely(skb_has_shared_frag(skb))) {
3255 ret = __skb_linearize(skb);
3256 if (ret)
3257 goto out;
3258 }
3259 start = skb_checksum_start_offset(skb);
3260 offset = start + offsetof(struct sctphdr, checksum);
3261 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3262 ret = -EINVAL;
3263 goto out;
3264 }
Heiner Kallweit8211fbf2019-10-06 18:52:43 +02003265
3266 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3267 if (ret)
3268 goto out;
3269
Davide Carattib72b5bf2017-05-18 15:44:38 +02003270 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3271 skb->len - start, ~(__u32)0,
3272 crc32c_csum_stub));
3273 *(__le32 *)(skb->data + offset) = crc32c_csum;
3274 skb->ip_summed = CHECKSUM_NONE;
Davide Carattidba00302017-05-18 15:44:40 +02003275 skb->csum_not_inet = 0;
Davide Carattib72b5bf2017-05-18 15:44:38 +02003276out:
3277 return ret;
3278}
3279
Vlad Yasevich53d64712014-03-27 17:26:18 -04003280__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003281{
3282 __be16 type = skb->protocol;
3283
Pravin B Shelar19acc322013-05-07 20:41:07 +00003284 /* Tunnel gso handlers can set protocol to ethernet. */
3285 if (type == htons(ETH_P_TEB)) {
3286 struct ethhdr *eth;
3287
3288 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3289 return 0;
3290
Eric Dumazet1dfe82e2018-03-26 08:08:07 -07003291 eth = (struct ethhdr *)skb->data;
Pravin B Shelar19acc322013-05-07 20:41:07 +00003292 type = eth->h_proto;
3293 }
3294
Toshiaki Makitad4bcef32015-01-29 20:37:07 +09003295 return __vlan_get_protocol(skb, type, depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003296}
3297
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00003298/**
3299 * skb_mac_gso_segment - mac layer segmentation handler.
3300 * @skb: buffer to segment
3301 * @features: features for the output path (see dev->features)
3302 */
3303struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3304 netdev_features_t features)
3305{
3306 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3307 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04003308 int vlan_depth = skb->mac_len;
3309 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00003310
Pravin B Shelarec5f0612013-03-07 09:28:01 +00003311 if (unlikely(!type))
3312 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00003313
Vlad Yasevich53d64712014-03-27 17:26:18 -04003314 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00003315
3316 rcu_read_lock();
3317 list_for_each_entry_rcu(ptype, &offload_base, list) {
3318 if (ptype->type == type && ptype->callbacks.gso_segment) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00003319 segs = ptype->callbacks.gso_segment(skb, features);
3320 break;
3321 }
3322 }
3323 rcu_read_unlock();
3324
3325 __skb_push(skb, skb->data - skb_mac_header(skb));
3326
3327 return segs;
3328}
3329EXPORT_SYMBOL(skb_mac_gso_segment);
3330
3331
Cong Wang12b00042013-02-05 16:36:38 +00003332/* openvswitch calls this on rx path, so we need a different check.
3333 */
3334static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3335{
3336 if (tx_path)
Willem de Bruijn0c19f8462017-11-21 10:22:25 -05003337 return skb->ip_summed != CHECKSUM_PARTIAL &&
3338 skb->ip_summed != CHECKSUM_UNNECESSARY;
Eric Dumazet6e7bc472017-02-03 14:29:42 -08003339
3340 return skb->ip_summed == CHECKSUM_NONE;
Cong Wang12b00042013-02-05 16:36:38 +00003341}
3342
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003343/**
Cong Wang12b00042013-02-05 16:36:38 +00003344 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003345 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07003346 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00003347 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003348 *
3349 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07003350 *
3351 * It may return NULL if the skb requires no segmentation. This is
3352 * only possible when GSO is used for verifying header integrity.
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03003353 *
Cambda Zhua08e7fd2020-03-26 15:33:14 +08003354 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003355 */
Cong Wang12b00042013-02-05 16:36:38 +00003356struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3357 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003358{
Eric Dumazetb2504a52017-01-31 10:20:32 -08003359 struct sk_buff *segs;
3360
Cong Wang12b00042013-02-05 16:36:38 +00003361 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00003362 int err;
3363
Eric Dumazetb2504a52017-01-31 10:20:32 -08003364 /* We're going to init ->check field in TCP or UDP header */
françois romieua40e0a62014-07-15 23:55:35 +02003365 err = skb_cow_head(skb, 0);
3366 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07003367 return ERR_PTR(err);
3368 }
3369
Alexander Duyck802ab552016-04-10 21:45:03 -04003370 /* Only report GSO partial support if it will enable us to
3371 * support segmentation on this frame without needing additional
3372 * work.
3373 */
3374 if (features & NETIF_F_GSO_PARTIAL) {
3375 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3376 struct net_device *dev = skb->dev;
3377
3378 partial_features |= dev->features & dev->gso_partial_features;
3379 if (!skb_gso_ok(skb, features | partial_features))
3380 features &= ~NETIF_F_GSO_PARTIAL;
3381 }
3382
Cambda Zhua08e7fd2020-03-26 15:33:14 +08003383 BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
Konstantin Khlebnikov9207f9d2016-01-08 15:21:46 +03003384 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3385
Pravin B Shelar68c33162013-02-14 14:02:41 +00003386 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07003387 SKB_GSO_CB(skb)->encap_level = 0;
3388
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00003389 skb_reset_mac_header(skb);
3390 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003391
Eric Dumazetb2504a52017-01-31 10:20:32 -08003392 segs = skb_mac_gso_segment(skb, features);
3393
Steffen Klassert3a1296a2020-01-25 11:26:44 +01003394 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
Eric Dumazetb2504a52017-01-31 10:20:32 -08003395 skb_warn_bad_offload(skb);
3396
3397 return segs;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003398}
Cong Wang12b00042013-02-05 16:36:38 +00003399EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003400
Herbert Xufb286bb2005-11-10 13:01:24 -08003401/* Take action when hardware reception checksum errors are detected. */
3402#ifdef CONFIG_BUG
Cong Wang7fe50ac2018-11-12 14:47:18 -08003403void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
Herbert Xufb286bb2005-11-10 13:01:24 -08003404{
3405 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00003406 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Willem de Bruijn64131392019-07-07 05:51:55 -04003407 skb_dump(KERN_ERR, skb, true);
Herbert Xufb286bb2005-11-10 13:01:24 -08003408 dump_stack();
3409 }
3410}
3411EXPORT_SYMBOL(netdev_rx_csum_fault);
3412#endif
3413
Christoph Hellwigab74cfe2018-04-03 20:31:35 +02003414/* XXX: check that highmem exists at all on the given machine. */
Florian Westphalc1e756b2014-05-05 15:00:44 +02003415static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416{
Herbert Xu3d3a8532006-06-27 13:33:10 -07003417#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 int i;
tchardingf4563a72017-02-09 17:56:07 +11003419
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00003420 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003421 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3422 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tchardingf4563a72017-02-09 17:56:07 +11003423
Ian Campbellea2ab692011-08-22 23:44:58 +00003424 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00003425 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00003426 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00003427 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07003428#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 return 0;
3430}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
Simon Horman3b392dd2014-06-04 08:53:17 +09003432/* If MPLS offload request, verify we are testing hardware MPLS features
3433 * instead of standard features for the netdev.
3434 */
Pravin B Shelard0edc7b2014-12-23 16:20:11 -08003435#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
Simon Horman3b392dd2014-06-04 08:53:17 +09003436static netdev_features_t net_mpls_features(struct sk_buff *skb,
3437 netdev_features_t features,
3438 __be16 type)
3439{
Simon Horman25cd9ba2014-10-06 05:05:13 -07003440 if (eth_p_mpls(type))
Simon Horman3b392dd2014-06-04 08:53:17 +09003441 features &= skb->dev->mpls_features;
3442
3443 return features;
3444}
3445#else
3446static netdev_features_t net_mpls_features(struct sk_buff *skb,
3447 netdev_features_t features,
3448 __be16 type)
3449{
3450 return features;
3451}
3452#endif
3453
Michał Mirosławc8f44af2011-11-15 15:29:55 +00003454static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02003455 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00003456{
Vlad Yasevich53d64712014-03-27 17:26:18 -04003457 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09003458 __be16 type;
3459
3460 type = skb_network_protocol(skb, &tmp);
3461 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04003462
Ed Cashinc0d680e2012-09-19 15:49:00 +00003463 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09003464 !can_checksum_protocol(features, type)) {
Alexander Duyck996e8022016-05-02 09:25:10 -07003465 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Jesse Grossf01a5232011-01-09 06:23:31 +00003466 }
Eric Dumazet7be2c822017-01-18 12:12:17 -08003467 if (illegal_highdma(skb->dev, skb))
3468 features &= ~NETIF_F_SG;
Jesse Grossf01a5232011-01-09 06:23:31 +00003469
3470 return features;
3471}
3472
Toshiaki Makitae38f3022015-03-27 14:31:13 +09003473netdev_features_t passthru_features_check(struct sk_buff *skb,
3474 struct net_device *dev,
3475 netdev_features_t features)
3476{
3477 return features;
3478}
3479EXPORT_SYMBOL(passthru_features_check);
3480
Toshiaki Makita7ce23672018-04-17 18:46:14 +09003481static netdev_features_t dflt_features_check(struct sk_buff *skb,
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09003482 struct net_device *dev,
3483 netdev_features_t features)
3484{
3485 return vlan_features_check(skb, features);
3486}
3487
Alexander Duyckcbc53e02016-04-10 21:44:51 -04003488static netdev_features_t gso_features_check(const struct sk_buff *skb,
3489 struct net_device *dev,
3490 netdev_features_t features)
3491{
3492 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3493
3494 if (gso_segs > dev->gso_max_segs)
3495 return features & ~NETIF_F_GSO_MASK;
3496
Alexander Duyck802ab552016-04-10 21:45:03 -04003497 /* Support for GSO partial features requires software
3498 * intervention before we can actually process the packets
3499 * so we need to strip support for any partial features now
3500 * and we can pull them back in after we have partially
3501 * segmented the frame.
3502 */
3503 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3504 features &= ~dev->gso_partial_features;
3505
3506 /* Make sure to clear the IPv4 ID mangling feature if the
3507 * IPv4 header has the potential to be fragmented.
Alexander Duyckcbc53e02016-04-10 21:44:51 -04003508 */
3509 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3510 struct iphdr *iph = skb->encapsulation ?
3511 inner_ip_hdr(skb) : ip_hdr(skb);
3512
3513 if (!(iph->frag_off & htons(IP_DF)))
3514 features &= ~NETIF_F_TSO_MANGLEID;
3515 }
3516
3517 return features;
3518}
3519
Florian Westphalc1e756b2014-05-05 15:00:44 +02003520netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00003521{
Jesse Gross5f352272014-12-23 22:37:26 -08003522 struct net_device *dev = skb->dev;
Eric Dumazetfcbeb972014-10-05 10:11:27 -07003523 netdev_features_t features = dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00003524
Alexander Duyckcbc53e02016-04-10 21:44:51 -04003525 if (skb_is_gso(skb))
3526 features = gso_features_check(skb, dev, features);
Ben Hutchings30b678d2012-07-30 15:57:00 +00003527
Jesse Gross5f352272014-12-23 22:37:26 -08003528 /* If encapsulation offload request, verify we are testing
3529 * hardware encapsulation features instead of standard
3530 * features for the netdev
3531 */
3532 if (skb->encapsulation)
3533 features &= dev->hw_enc_features;
3534
Toshiaki Makitaf5a7fb82015-03-27 14:31:11 +09003535 if (skb_vlan_tagged(skb))
3536 features = netdev_intersect_features(features,
3537 dev->vlan_features |
3538 NETIF_F_HW_VLAN_CTAG_TX |
3539 NETIF_F_HW_VLAN_STAG_TX);
Jesse Gross58e998c2010-10-29 12:14:55 +00003540
Jesse Gross5f352272014-12-23 22:37:26 -08003541 if (dev->netdev_ops->ndo_features_check)
3542 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3543 features);
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09003544 else
3545 features &= dflt_features_check(skb, dev, features);
Jesse Gross5f352272014-12-23 22:37:26 -08003546
Florian Westphalc1e756b2014-05-05 15:00:44 +02003547 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00003548}
Florian Westphalc1e756b2014-05-05 15:00:44 +02003549EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00003550
David S. Miller2ea25512014-08-29 21:10:01 -07003551static int xmit_one(struct sk_buff *skb, struct net_device *dev,
David S. Miller95f6b3d2014-08-29 21:57:30 -07003552 struct netdev_queue *txq, bool more)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003553{
David S. Miller2ea25512014-08-29 21:10:01 -07003554 unsigned int len;
3555 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08003556
Maciej W. Rozycki9f9a7422018-10-09 23:57:49 +01003557 if (dev_nit_active(dev))
David S. Miller2ea25512014-08-29 21:10:01 -07003558 dev_queue_xmit_nit(skb, dev);
Jesse Grossfc741212011-01-09 06:23:32 +00003559
David S. Miller2ea25512014-08-29 21:10:01 -07003560 len = skb->len;
3561 trace_net_dev_start_xmit(skb, dev);
David S. Miller95f6b3d2014-08-29 21:57:30 -07003562 rc = netdev_start_xmit(skb, dev, txq, more);
David S. Miller2ea25512014-08-29 21:10:01 -07003563 trace_net_dev_xmit(skb, rc, dev, len);
Eric Dumazetadf30902009-06-02 05:19:30 +00003564
Patrick McHardy572a9d72009-11-10 06:14:14 +00003565 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003566}
David S. Miller2ea25512014-08-29 21:10:01 -07003567
David S. Miller8dcda222014-09-01 15:06:40 -07003568struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3569 struct netdev_queue *txq, int *ret)
David S. Miller7f2e8702014-08-29 21:19:14 -07003570{
3571 struct sk_buff *skb = first;
3572 int rc = NETDEV_TX_OK;
3573
3574 while (skb) {
3575 struct sk_buff *next = skb->next;
3576
David S. Millera8305bf2018-07-29 20:42:53 -07003577 skb_mark_not_on_list(skb);
David S. Miller95f6b3d2014-08-29 21:57:30 -07003578 rc = xmit_one(skb, dev, txq, next != NULL);
David S. Miller7f2e8702014-08-29 21:19:14 -07003579 if (unlikely(!dev_xmit_complete(rc))) {
3580 skb->next = next;
3581 goto out;
3582 }
3583
3584 skb = next;
Eric Dumazetfe60faa2018-10-31 08:39:13 -07003585 if (netif_tx_queue_stopped(txq) && skb) {
David S. Miller7f2e8702014-08-29 21:19:14 -07003586 rc = NETDEV_TX_BUSY;
3587 break;
3588 }
3589 }
3590
3591out:
3592 *ret = rc;
3593 return skb;
3594}
3595
Eric Dumazet1ff0dc92014-10-06 11:26:27 -07003596static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3597 netdev_features_t features)
David S. Millereae3f882014-08-30 15:17:13 -07003598{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003599 if (skb_vlan_tag_present(skb) &&
Jiri Pirko59682502014-11-19 14:04:59 +01003600 !vlan_hw_offload_capable(features, skb->vlan_proto))
3601 skb = __vlan_hwaccel_push_inside(skb);
David S. Millereae3f882014-08-30 15:17:13 -07003602 return skb;
3603}
3604
Davide Caratti43c26a12017-05-18 15:44:41 +02003605int skb_csum_hwoffload_help(struct sk_buff *skb,
3606 const netdev_features_t features)
3607{
3608 if (unlikely(skb->csum_not_inet))
3609 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3610 skb_crc32c_csum_help(skb);
3611
3612 return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3613}
3614EXPORT_SYMBOL(skb_csum_hwoffload_help);
3615
Steffen Klassertf53c7232017-12-20 10:41:36 +01003616static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
David S. Millereae3f882014-08-30 15:17:13 -07003617{
3618 netdev_features_t features;
3619
David S. Millereae3f882014-08-30 15:17:13 -07003620 features = netif_skb_features(skb);
3621 skb = validate_xmit_vlan(skb, features);
3622 if (unlikely(!skb))
3623 goto out_null;
3624
Ilya Lesokhinebf4e802018-04-30 10:16:12 +03003625 skb = sk_validate_xmit_skb(skb, dev);
3626 if (unlikely(!skb))
3627 goto out_null;
3628
Johannes Berg8b86a612015-04-17 15:45:04 +02003629 if (netif_needs_gso(skb, features)) {
David S. Millerce937182014-08-30 19:22:20 -07003630 struct sk_buff *segs;
3631
3632 segs = skb_gso_segment(skb, features);
Jason Wangcecda692014-09-19 16:04:38 +08003633 if (IS_ERR(segs)) {
Jason Wangaf6dabc2014-12-19 11:09:13 +08003634 goto out_kfree_skb;
Jason Wangcecda692014-09-19 16:04:38 +08003635 } else if (segs) {
3636 consume_skb(skb);
3637 skb = segs;
3638 }
David S. Millereae3f882014-08-30 15:17:13 -07003639 } else {
3640 if (skb_needs_linearize(skb, features) &&
3641 __skb_linearize(skb))
3642 goto out_kfree_skb;
3643
3644 /* If packet is not checksummed and device does not
3645 * support checksumming for this protocol, complete
3646 * checksumming here.
3647 */
3648 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3649 if (skb->encapsulation)
3650 skb_set_inner_transport_header(skb,
3651 skb_checksum_start_offset(skb));
3652 else
3653 skb_set_transport_header(skb,
3654 skb_checksum_start_offset(skb));
Davide Caratti43c26a12017-05-18 15:44:41 +02003655 if (skb_csum_hwoffload_help(skb, features))
David S. Millereae3f882014-08-30 15:17:13 -07003656 goto out_kfree_skb;
3657 }
3658 }
3659
Steffen Klassertf53c7232017-12-20 10:41:36 +01003660 skb = validate_xmit_xfrm(skb, features, again);
Steffen Klassert3dca3f32017-12-20 10:41:31 +01003661
David S. Millereae3f882014-08-30 15:17:13 -07003662 return skb;
3663
3664out_kfree_skb:
3665 kfree_skb(skb);
3666out_null:
Eric Dumazetd21fd632016-04-12 21:50:07 -07003667 atomic_long_inc(&dev->tx_dropped);
David S. Millereae3f882014-08-30 15:17:13 -07003668 return NULL;
3669}
3670
Steffen Klassertf53c7232017-12-20 10:41:36 +01003671struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
Eric Dumazet55a93b32014-10-03 15:31:07 -07003672{
3673 struct sk_buff *next, *head = NULL, *tail;
3674
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003675 for (; skb != NULL; skb = next) {
Eric Dumazet55a93b32014-10-03 15:31:07 -07003676 next = skb->next;
David S. Millera8305bf2018-07-29 20:42:53 -07003677 skb_mark_not_on_list(skb);
Eric Dumazet55a93b32014-10-03 15:31:07 -07003678
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003679 /* in case skb wont be segmented, point to itself */
3680 skb->prev = skb;
3681
Steffen Klassertf53c7232017-12-20 10:41:36 +01003682 skb = validate_xmit_skb(skb, dev, again);
Eric Dumazetbec3cfd2014-10-03 20:59:19 -07003683 if (!skb)
3684 continue;
3685
3686 if (!head)
3687 head = skb;
3688 else
3689 tail->next = skb;
3690 /* If skb was segmented, skb->prev points to
3691 * the last segment. If not, it still contains skb.
3692 */
3693 tail = skb->prev;
Eric Dumazet55a93b32014-10-03 15:31:07 -07003694 }
3695 return head;
3696}
Willem de Bruijn104ba782016-10-26 11:23:07 -04003697EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07003698
Eric Dumazet1def9232013-01-10 12:36:42 +00003699static void qdisc_pkt_len_init(struct sk_buff *skb)
3700{
3701 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3702
3703 qdisc_skb_cb(skb)->pkt_len = skb->len;
3704
3705 /* To get more precise estimation of bytes sent on wire,
3706 * we add to pkt_len the headers size of all segments
3707 */
Maxim Mikityanskiya0dce872019-02-22 12:55:22 +00003708 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08003709 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00003710 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00003711
Eric Dumazet757b8b12013-01-15 21:14:21 -08003712 /* mac layer + network layer */
3713 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3714
3715 /* + transport layer */
Eric Dumazet7c68d1a2018-01-18 19:59:19 -08003716 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3717 const struct tcphdr *th;
3718 struct tcphdr _tcphdr;
3719
3720 th = skb_header_pointer(skb, skb_transport_offset(skb),
3721 sizeof(_tcphdr), &_tcphdr);
3722 if (likely(th))
3723 hdr_len += __tcp_hdrlen(th);
3724 } else {
3725 struct udphdr _udphdr;
3726
3727 if (skb_header_pointer(skb, skb_transport_offset(skb),
3728 sizeof(_udphdr), &_udphdr))
3729 hdr_len += sizeof(struct udphdr);
3730 }
Jason Wang15e5a032013-03-25 20:19:59 +00003731
3732 if (shinfo->gso_type & SKB_GSO_DODGY)
3733 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3734 shinfo->gso_size);
3735
3736 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00003737 }
3738}
3739
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003740static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3741 struct net_device *dev,
3742 struct netdev_queue *txq)
3743{
3744 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazet520ac302016-06-21 23:16:49 -07003745 struct sk_buff *to_free = NULL;
Eric Dumazeta2da5702011-01-20 03:48:19 +00003746 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003747 int rc;
3748
Eric Dumazeta2da5702011-01-20 03:48:19 +00003749 qdisc_calculate_pkt_len(skb, q);
John Fastabend6b3ba912017-12-07 09:54:25 -08003750
3751 if (q->flags & TCQ_F_NOLOCK) {
Petr Machataaebe4422020-06-27 01:45:25 +03003752 rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK;
Paolo Abeni379349e2020-02-18 18:15:44 +01003753 qdisc_run(q);
John Fastabend6b3ba912017-12-07 09:54:25 -08003754
3755 if (unlikely(to_free))
3756 kfree_skb_list(to_free);
3757 return rc;
3758 }
3759
Eric Dumazet79640a42010-06-02 05:09:29 -07003760 /*
3761 * Heuristic to force contended enqueues to serialize on a
3762 * separate lock before trying to get qdisc main lock.
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07003763 * This permits qdisc->running owner to get the lock more
Ying Xue9bf2b8c2014-06-26 15:56:31 +08003764 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07003765 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00003766 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003767 if (unlikely(contended))
3768 spin_lock(&q->busylock);
3769
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003770 spin_lock(root_lock);
3771 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
Eric Dumazet520ac302016-06-21 23:16:49 -07003772 __qdisc_drop(skb, &to_free);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003773 rc = NET_XMIT_DROP;
3774 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07003775 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003776 /*
3777 * This is a work-conserving queue; there are no old skbs
3778 * waiting to be sent out; and the qdisc is not running -
3779 * xmit the skb directly.
3780 */
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003781
Eric Dumazetbfe0d022011-01-09 08:30:54 +00003782 qdisc_bstats_update(q, skb);
3783
Eric Dumazet55a93b32014-10-03 15:31:07 -07003784 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
Eric Dumazet79640a42010-06-02 05:09:29 -07003785 if (unlikely(contended)) {
3786 spin_unlock(&q->busylock);
3787 contended = false;
3788 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003789 __qdisc_run(q);
John Fastabend6c148182017-12-07 09:54:06 -08003790 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003791
John Fastabend6c148182017-12-07 09:54:06 -08003792 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003793 rc = NET_XMIT_SUCCESS;
3794 } else {
Petr Machataaebe4422020-06-27 01:45:25 +03003795 rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07003796 if (qdisc_run_begin(q)) {
3797 if (unlikely(contended)) {
3798 spin_unlock(&q->busylock);
3799 contended = false;
3800 }
3801 __qdisc_run(q);
John Fastabend6c148182017-12-07 09:54:06 -08003802 qdisc_run_end(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07003803 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003804 }
3805 spin_unlock(root_lock);
Eric Dumazet520ac302016-06-21 23:16:49 -07003806 if (unlikely(to_free))
3807 kfree_skb_list(to_free);
Eric Dumazet79640a42010-06-02 05:09:29 -07003808 if (unlikely(contended))
3809 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00003810 return rc;
3811}
3812
Daniel Borkmann86f85152013-12-29 17:27:11 +01003813#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00003814static void skb_update_prio(struct sk_buff *skb)
3815{
Eric Dumazet4dcb31d2018-03-14 09:04:16 -07003816 const struct netprio_map *map;
3817 const struct sock *sk;
3818 unsigned int prioidx;
Neil Horman5bc14212011-11-22 05:10:51 +00003819
Eric Dumazet4dcb31d2018-03-14 09:04:16 -07003820 if (skb->priority)
3821 return;
3822 map = rcu_dereference_bh(skb->dev->priomap);
3823 if (!map)
3824 return;
3825 sk = skb_to_full_sk(skb);
3826 if (!sk)
3827 return;
Eric Dumazet91c68ce2012-07-08 21:45:10 +00003828
Eric Dumazet4dcb31d2018-03-14 09:04:16 -07003829 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3830
3831 if (prioidx < map->priomap_len)
3832 skb->priority = map->priomap[prioidx];
Neil Horman5bc14212011-11-22 05:10:51 +00003833}
3834#else
3835#define skb_update_prio(skb)
3836#endif
3837
Dave Jonesd29f7492008-07-22 14:09:06 -07003838/**
Michel Machado95603e22012-06-12 10:16:35 +00003839 * dev_loopback_xmit - loop back @skb
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003840 * @net: network namespace this loopback is happening in
3841 * @sk: sk needed to be a netfilter okfn
Michel Machado95603e22012-06-12 10:16:35 +00003842 * @skb: buffer to transmit
3843 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05003844int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
Michel Machado95603e22012-06-12 10:16:35 +00003845{
3846 skb_reset_mac_header(skb);
3847 __skb_pull(skb, skb_network_offset(skb));
3848 skb->pkt_type = PACKET_LOOPBACK;
3849 skb->ip_summed = CHECKSUM_UNNECESSARY;
3850 WARN_ON(!skb_dst(skb));
3851 skb_dst_force(skb);
3852 netif_rx_ni(skb);
3853 return 0;
3854}
3855EXPORT_SYMBOL(dev_loopback_xmit);
3856
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003857#ifdef CONFIG_NET_EGRESS
3858static struct sk_buff *
3859sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3860{
Jiri Pirko46209402017-11-03 11:46:25 +01003861 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003862 struct tcf_result cl_res;
3863
Jiri Pirko46209402017-11-03 11:46:25 +01003864 if (!miniq)
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003865 return skb;
3866
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05003867 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
Jiri Pirko46209402017-11-03 11:46:25 +01003868 mini_qdisc_bstats_cpu_update(miniq, skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003869
Jiri Pirko46209402017-11-03 11:46:25 +01003870 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003871 case TC_ACT_OK:
3872 case TC_ACT_RECLASSIFY:
3873 skb->tc_index = TC_H_MIN(cl_res.classid);
3874 break;
3875 case TC_ACT_SHOT:
Jiri Pirko46209402017-11-03 11:46:25 +01003876 mini_qdisc_qstats_cpu_drop(miniq);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003877 *ret = NET_XMIT_DROP;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003878 kfree_skb(skb);
3879 return NULL;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003880 case TC_ACT_STOLEN:
3881 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02003882 case TC_ACT_TRAP:
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003883 *ret = NET_XMIT_SUCCESS;
Daniel Borkmann7e2c3ae2016-05-15 23:28:29 +02003884 consume_skb(skb);
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003885 return NULL;
3886 case TC_ACT_REDIRECT:
3887 /* No need to push/pop skb's mac_header here on egress! */
3888 skb_do_redirect(skb);
3889 *ret = NET_XMIT_SUCCESS;
3890 return NULL;
3891 default:
3892 break;
3893 }
Daniel Borkmann357b6cc2020-03-18 10:33:22 +01003894
Daniel Borkmann1f211a12016-01-07 22:29:47 +01003895 return skb;
3896}
3897#endif /* CONFIG_NET_EGRESS */
3898
Amritha Nambiarfc9bab22018-06-29 21:27:02 -07003899#ifdef CONFIG_XPS
3900static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3901 struct xps_dev_maps *dev_maps, unsigned int tci)
3902{
3903 struct xps_map *map;
3904 int queue_index = -1;
3905
3906 if (dev->num_tc) {
3907 tci *= dev->num_tc;
3908 tci += netdev_get_prio_tc_map(dev, skb->priority);
3909 }
3910
3911 map = rcu_dereference(dev_maps->attr_map[tci]);
3912 if (map) {
3913 if (map->len == 1)
3914 queue_index = map->queues[0];
3915 else
3916 queue_index = map->queues[reciprocal_scale(
3917 skb_get_hash(skb), map->len)];
3918 if (unlikely(queue_index >= dev->real_num_tx_queues))
3919 queue_index = -1;
3920 }
3921 return queue_index;
3922}
3923#endif
3924
Alexander Duyckeadec8772018-07-09 12:19:48 -04003925static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3926 struct sk_buff *skb)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003927{
3928#ifdef CONFIG_XPS
3929 struct xps_dev_maps *dev_maps;
Amritha Nambiarfc9bab22018-06-29 21:27:02 -07003930 struct sock *sk = skb->sk;
Jiri Pirko638b2a62015-05-12 14:56:13 +02003931 int queue_index = -1;
3932
Amritha Nambiar04157462018-06-29 21:26:46 -07003933 if (!static_key_false(&xps_needed))
3934 return -1;
3935
Jiri Pirko638b2a62015-05-12 14:56:13 +02003936 rcu_read_lock();
Amritha Nambiarfc9bab22018-06-29 21:27:02 -07003937 if (!static_key_false(&xps_rxqs_needed))
3938 goto get_cpus_map;
3939
Alexander Duyckeadec8772018-07-09 12:19:48 -04003940 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003941 if (dev_maps) {
Amritha Nambiarfc9bab22018-06-29 21:27:02 -07003942 int tci = sk_rx_queue_get(sk);
Alexander Duyck184c4492016-10-28 11:50:13 -04003943
Amritha Nambiarfc9bab22018-06-29 21:27:02 -07003944 if (tci >= 0 && tci < dev->num_rx_queues)
3945 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3946 tci);
3947 }
Alexander Duyck184c4492016-10-28 11:50:13 -04003948
Amritha Nambiarfc9bab22018-06-29 21:27:02 -07003949get_cpus_map:
3950 if (queue_index < 0) {
Alexander Duyckeadec8772018-07-09 12:19:48 -04003951 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
Amritha Nambiarfc9bab22018-06-29 21:27:02 -07003952 if (dev_maps) {
3953 unsigned int tci = skb->sender_cpu - 1;
3954
3955 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3956 tci);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003957 }
3958 }
3959 rcu_read_unlock();
3960
3961 return queue_index;
3962#else
3963 return -1;
3964#endif
3965}
3966
Alexander Duycka4ea8a32018-07-09 12:19:54 -04003967u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +01003968 struct net_device *sb_dev)
Alexander Duycka4ea8a32018-07-09 12:19:54 -04003969{
3970 return 0;
3971}
3972EXPORT_SYMBOL(dev_pick_tx_zero);
3973
3974u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +01003975 struct net_device *sb_dev)
Alexander Duycka4ea8a32018-07-09 12:19:54 -04003976{
3977 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3978}
3979EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3980
Paolo Abenib71b5832019-03-20 11:02:05 +01003981u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3982 struct net_device *sb_dev)
Jiri Pirko638b2a62015-05-12 14:56:13 +02003983{
3984 struct sock *sk = skb->sk;
3985 int queue_index = sk_tx_queue_get(sk);
3986
Alexander Duyckeadec8772018-07-09 12:19:48 -04003987 sb_dev = sb_dev ? : dev;
3988
Jiri Pirko638b2a62015-05-12 14:56:13 +02003989 if (queue_index < 0 || skb->ooo_okay ||
3990 queue_index >= dev->real_num_tx_queues) {
Alexander Duyckeadec8772018-07-09 12:19:48 -04003991 int new_index = get_xps_queue(dev, sb_dev, skb);
tchardingf4563a72017-02-09 17:56:07 +11003992
Jiri Pirko638b2a62015-05-12 14:56:13 +02003993 if (new_index < 0)
Alexander Duyckeadec8772018-07-09 12:19:48 -04003994 new_index = skb_tx_hash(dev, sb_dev, skb);
Jiri Pirko638b2a62015-05-12 14:56:13 +02003995
3996 if (queue_index != new_index && sk &&
Eric Dumazet004a5d02015-10-04 21:08:10 -07003997 sk_fullsock(sk) &&
Jiri Pirko638b2a62015-05-12 14:56:13 +02003998 rcu_access_pointer(sk->sk_dst_cache))
3999 sk_tx_queue_set(sk, new_index);
4000
4001 queue_index = new_index;
4002 }
4003
4004 return queue_index;
4005}
Paolo Abenib71b5832019-03-20 11:02:05 +01004006EXPORT_SYMBOL(netdev_pick_tx);
Jiri Pirko638b2a62015-05-12 14:56:13 +02004007
Paolo Abeni4bd97d52019-03-20 11:02:04 +01004008struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4009 struct sk_buff *skb,
4010 struct net_device *sb_dev)
Jiri Pirko638b2a62015-05-12 14:56:13 +02004011{
4012 int queue_index = 0;
4013
4014#ifdef CONFIG_XPS
Eric Dumazet52bd2d62015-11-18 06:30:50 -08004015 u32 sender_cpu = skb->sender_cpu - 1;
4016
4017 if (sender_cpu >= (u32)NR_CPUS)
Jiri Pirko638b2a62015-05-12 14:56:13 +02004018 skb->sender_cpu = raw_smp_processor_id() + 1;
4019#endif
4020
4021 if (dev->real_num_tx_queues != 1) {
4022 const struct net_device_ops *ops = dev->netdev_ops;
tchardingf4563a72017-02-09 17:56:07 +11004023
Jiri Pirko638b2a62015-05-12 14:56:13 +02004024 if (ops->ndo_select_queue)
Paolo Abenia350ecc2019-03-20 11:02:06 +01004025 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
Jiri Pirko638b2a62015-05-12 14:56:13 +02004026 else
Paolo Abeni4bd97d52019-03-20 11:02:04 +01004027 queue_index = netdev_pick_tx(dev, skb, sb_dev);
Jiri Pirko638b2a62015-05-12 14:56:13 +02004028
Alexander Duyckd5845272017-11-22 10:57:41 -08004029 queue_index = netdev_cap_txqueue(dev, queue_index);
Jiri Pirko638b2a62015-05-12 14:56:13 +02004030 }
4031
4032 skb_set_queue_mapping(skb, queue_index);
4033 return netdev_get_tx_queue(dev, queue_index);
4034}
4035
Michel Machado95603e22012-06-12 10:16:35 +00004036/**
Jason Wang9d08dd32014-01-20 11:25:13 +08004037 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07004038 * @skb: buffer to transmit
Alexander Duyckeadec8772018-07-09 12:19:48 -04004039 * @sb_dev: suboordinate device used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07004040 *
4041 * Queue a buffer for transmission to a network device. The caller must
4042 * have set the device and priority and built the buffer before calling
4043 * this function. The function can be called from an interrupt.
4044 *
4045 * A negative errno code is returned on a failure. A success does not
4046 * guarantee the frame will be transmitted as it may be dropped due
4047 * to congestion or traffic shaping.
4048 *
4049 * -----------------------------------------------------------------------------------
4050 * I notice this method can also return errors from the queue disciplines,
4051 * including NET_XMIT_DROP, which is a positive value. So, errors can also
4052 * be positive.
4053 *
4054 * Regardless of the return value, the skb is consumed, so it is currently
4055 * difficult to retry a send to this method. (You can bump the ref count
4056 * before sending to hold a reference for retry if you are careful.)
4057 *
4058 * When calling this method, interrupts MUST be enabled. This is because
4059 * the BH enable code must have IRQs enabled so that it will not deadlock.
4060 * --BLG
4061 */
Alexander Duyckeadec8772018-07-09 12:19:48 -04004062static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063{
4064 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07004065 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 struct Qdisc *q;
4067 int rc = -ENOMEM;
Steffen Klassertf53c7232017-12-20 10:41:36 +01004068 bool again = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00004070 skb_reset_mac_header(skb);
4071
Willem de Bruijne7fd2882014-08-04 22:11:48 -04004072 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4073 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
4074
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004075 /* Disable soft irqs for various locks below. Also
4076 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004078 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
Neil Horman5bc14212011-11-22 05:10:51 +00004080 skb_update_prio(skb);
4081
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004082 qdisc_pkt_len_init(skb);
4083#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05004084 skb->tc_at_ingress = 0;
Daniel Borkmann357b6cc2020-03-18 10:33:22 +01004085# ifdef CONFIG_NET_EGRESS
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07004086 if (static_branch_unlikely(&egress_needed_key)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004087 skb = sch_handle_egress(skb, &rc, dev);
4088 if (!skb)
4089 goto out;
4090 }
Daniel Borkmann357b6cc2020-03-18 10:33:22 +01004091# endif
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004092#endif
Eric Dumazet02875872014-10-05 18:38:35 -07004093 /* If device/qdisc don't need skb->dst, release it right now while
4094 * its hot in this cpu cache.
4095 */
4096 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4097 skb_dst_drop(skb);
4098 else
4099 skb_dst_force(skb);
4100
Paolo Abeni4bd97d52019-03-20 11:02:04 +01004101 txq = netdev_core_pick_tx(dev, skb, sb_dev);
Paul E. McKenneya898def2010-02-22 17:04:49 -08004102 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07004103
Koki Sanagicf66ba52010-08-23 18:45:02 +09004104 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00004106 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07004107 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 }
4109
4110 /* The device has no queue. Common case for software devices:
tchardingeb13da12017-02-09 17:56:06 +11004111 * loopback, all the sorts of tunnels...
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112
tchardingeb13da12017-02-09 17:56:06 +11004113 * Really, it is unlikely that netif_tx_lock protection is necessary
4114 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4115 * counters.)
4116 * However, it is possible, that they rely on protection
4117 * made by us here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118
tchardingeb13da12017-02-09 17:56:06 +11004119 * Check this and shot the lock. It is not prone from deadlocks.
4120 *Either shot noqueue qdisc, it is even simpler 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121 */
4122 if (dev->flags & IFF_UP) {
4123 int cpu = smp_processor_id(); /* ok because BHs are off */
4124
David S. Millerc773e842008-07-08 23:13:53 -07004125 if (txq->xmit_lock_owner != cpu) {
Florian Westphal97cdcf32019-04-01 16:42:13 +02004126 if (dev_xmit_recursion())
Eric Dumazet745e20f2010-09-29 13:23:09 -07004127 goto recursion_alert;
4128
Steffen Klassertf53c7232017-12-20 10:41:36 +01004129 skb = validate_xmit_skb(skb, dev, &again);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02004130 if (!skb)
Eric Dumazetd21fd632016-04-12 21:50:07 -07004131 goto out;
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02004132
David S. Millerc773e842008-07-08 23:13:53 -07004133 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134
Tom Herbert734664982011-11-28 16:32:44 +00004135 if (!netif_xmit_stopped(txq)) {
Florian Westphal97cdcf32019-04-01 16:42:13 +02004136 dev_xmit_recursion_inc();
David S. Millerce937182014-08-30 19:22:20 -07004137 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
Florian Westphal97cdcf32019-04-01 16:42:13 +02004138 dev_xmit_recursion_dec();
Patrick McHardy572a9d72009-11-10 06:14:14 +00004139 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07004140 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141 goto out;
4142 }
4143 }
David S. Millerc773e842008-07-08 23:13:53 -07004144 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00004145 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4146 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147 } else {
4148 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07004149 * unfortunately
4150 */
4151recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00004152 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4153 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 }
4155 }
4156
4157 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07004158 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159
Eric Dumazet015f0682014-03-27 08:45:56 -07004160 atomic_long_inc(&dev->tx_dropped);
Jesper Dangaard Brouer1f595332014-09-03 17:56:09 +02004161 kfree_skb_list(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 return rc;
4163out:
Herbert Xud4828d82006-06-22 02:28:18 -07004164 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 return rc;
4166}
Jason Wangf663dd92014-01-10 16:18:26 +08004167
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05004168int dev_queue_xmit(struct sk_buff *skb)
Jason Wangf663dd92014-01-10 16:18:26 +08004169{
4170 return __dev_queue_xmit(skb, NULL);
4171}
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05004172EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173
Alexander Duyckeadec8772018-07-09 12:19:48 -04004174int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
Jason Wangf663dd92014-01-10 16:18:26 +08004175{
Alexander Duyckeadec8772018-07-09 12:19:48 -04004176 return __dev_queue_xmit(skb, sb_dev);
Jason Wangf663dd92014-01-10 16:18:26 +08004177}
4178EXPORT_SYMBOL(dev_queue_xmit_accel);
4179
Magnus Karlsson865b03f2018-05-02 13:01:33 +02004180int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4181{
4182 struct net_device *dev = skb->dev;
4183 struct sk_buff *orig_skb = skb;
4184 struct netdev_queue *txq;
4185 int ret = NETDEV_TX_BUSY;
4186 bool again = false;
4187
4188 if (unlikely(!netif_running(dev) ||
4189 !netif_carrier_ok(dev)))
4190 goto drop;
4191
4192 skb = validate_xmit_skb_list(skb, dev, &again);
4193 if (skb != orig_skb)
4194 goto drop;
4195
4196 skb_set_queue_mapping(skb, queue_id);
4197 txq = skb_get_tx_queue(dev, skb);
4198
4199 local_bh_disable();
4200
Eric Dumazet0ad6f6e2020-06-17 22:23:25 -07004201 dev_xmit_recursion_inc();
Magnus Karlsson865b03f2018-05-02 13:01:33 +02004202 HARD_TX_LOCK(dev, txq, smp_processor_id());
4203 if (!netif_xmit_frozen_or_drv_stopped(txq))
4204 ret = netdev_start_xmit(skb, dev, txq, false);
4205 HARD_TX_UNLOCK(dev, txq);
Eric Dumazet0ad6f6e2020-06-17 22:23:25 -07004206 dev_xmit_recursion_dec();
Magnus Karlsson865b03f2018-05-02 13:01:33 +02004207
4208 local_bh_enable();
4209
4210 if (!dev_xmit_complete(ret))
4211 kfree_skb(skb);
4212
4213 return ret;
4214drop:
4215 atomic_long_inc(&dev->tx_dropped);
4216 kfree_skb_list(skb);
4217 return NET_XMIT_DROP;
4218}
4219EXPORT_SYMBOL(dev_direct_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220
tchardingeb13da12017-02-09 17:56:06 +11004221/*************************************************************************
4222 * Receiver routines
4223 *************************************************************************/
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07004225int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00004226EXPORT_SYMBOL(netdev_max_backlog);
4227
Eric Dumazet3b098e22010-05-15 23:57:10 -07004228int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07004229int netdev_budget __read_mostly = 300;
Konstantin Khlebnikova48379802020-04-06 14:39:32 +03004230/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4231unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01004232int weight_p __read_mostly = 64; /* old backlog weight */
4233int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
4234int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
4235int dev_rx_weight __read_mostly = 64;
4236int dev_tx_weight __read_mostly = 64;
Edward Cree323ebb62019-08-06 14:53:55 +01004237/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4238int gro_normal_batch __read_mostly = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004240/* Called with irq disabled */
4241static inline void ____napi_schedule(struct softnet_data *sd,
4242 struct napi_struct *napi)
4243{
4244 list_add_tail(&napi->poll_list, &sd->poll_list);
4245 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4246}
4247
Eric Dumazetdf334542010-03-24 19:13:54 +00004248#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07004249
4250/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00004251struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07004252EXPORT_SYMBOL(rps_sock_flow_table);
Eric Dumazet567e4b72015-02-06 12:59:01 -08004253u32 rps_cpu_mask __read_mostly;
4254EXPORT_SYMBOL(rps_cpu_mask);
Tom Herbertfec5e652010-04-16 16:01:27 -07004255
Eric Dumazetdc053602019-03-22 08:56:38 -07004256struct static_key_false rps_needed __read_mostly;
Jason Wang3df97ba2016-04-25 23:13:42 -04004257EXPORT_SYMBOL(rps_needed);
Eric Dumazetdc053602019-03-22 08:56:38 -07004258struct static_key_false rfs_needed __read_mostly;
Eric Dumazet13bfff22016-12-07 08:29:10 -08004259EXPORT_SYMBOL(rfs_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +00004260
Ben Hutchingsc4454772011-01-19 11:03:53 +00004261static struct rps_dev_flow *
4262set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4263 struct rps_dev_flow *rflow, u16 next_cpu)
4264{
Eric Dumazeta31196b2015-04-25 09:35:24 -07004265 if (next_cpu < nr_cpu_ids) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00004266#ifdef CONFIG_RFS_ACCEL
4267 struct netdev_rx_queue *rxqueue;
4268 struct rps_dev_flow_table *flow_table;
4269 struct rps_dev_flow *old_rflow;
4270 u32 flow_id;
4271 u16 rxq_index;
4272 int rc;
4273
4274 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00004275 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4276 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00004277 goto out;
4278 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4279 if (rxq_index == skb_get_rx_queue(skb))
4280 goto out;
4281
4282 rxqueue = dev->_rx + rxq_index;
4283 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4284 if (!flow_table)
4285 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07004286 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00004287 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4288 rxq_index, flow_id);
4289 if (rc < 0)
4290 goto out;
4291 old_rflow = rflow;
4292 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00004293 rflow->filter = rc;
4294 if (old_rflow->filter == rflow->filter)
4295 old_rflow->filter = RPS_NO_FILTER;
4296 out:
4297#endif
4298 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00004299 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00004300 }
4301
Ben Hutchings09994d12011-10-03 04:42:46 +00004302 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00004303 return rflow;
4304}
4305
Tom Herbert0a9627f2010-03-16 08:03:29 +00004306/*
4307 * get_rps_cpu is called from netif_receive_skb and returns the target
4308 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004309 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00004310 */
Tom Herbertfec5e652010-04-16 16:01:27 -07004311static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4312 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004313{
Eric Dumazet567e4b72015-02-06 12:59:01 -08004314 const struct rps_sock_flow_table *sock_flow_table;
4315 struct netdev_rx_queue *rxqueue = dev->_rx;
Tom Herbertfec5e652010-04-16 16:01:27 -07004316 struct rps_dev_flow_table *flow_table;
Eric Dumazet567e4b72015-02-06 12:59:01 -08004317 struct rps_map *map;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004318 int cpu = -1;
Eric Dumazet567e4b72015-02-06 12:59:01 -08004319 u32 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07004320 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004321
Tom Herbert0a9627f2010-03-16 08:03:29 +00004322 if (skb_rx_queue_recorded(skb)) {
4323 u16 index = skb_get_rx_queue(skb);
Eric Dumazet567e4b72015-02-06 12:59:01 -08004324
Ben Hutchings62fe0b42010-09-27 08:24:33 +00004325 if (unlikely(index >= dev->real_num_rx_queues)) {
4326 WARN_ONCE(dev->real_num_rx_queues > 1,
4327 "%s received packet on queue %u, but number "
4328 "of RX queues is %u\n",
4329 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004330 goto done;
4331 }
Eric Dumazet567e4b72015-02-06 12:59:01 -08004332 rxqueue += index;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004333 }
4334
Eric Dumazet567e4b72015-02-06 12:59:01 -08004335 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4336
4337 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4338 map = rcu_dereference(rxqueue->rps_map);
4339 if (!flow_table && !map)
4340 goto done;
4341
Changli Gao2d47b452010-08-17 19:00:56 +00004342 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07004343 hash = skb_get_hash(skb);
4344 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004345 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004346
Tom Herbertfec5e652010-04-16 16:01:27 -07004347 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4348 if (flow_table && sock_flow_table) {
Tom Herbertfec5e652010-04-16 16:01:27 -07004349 struct rps_dev_flow *rflow;
Eric Dumazet567e4b72015-02-06 12:59:01 -08004350 u32 next_cpu;
4351 u32 ident;
Tom Herbertfec5e652010-04-16 16:01:27 -07004352
Eric Dumazet567e4b72015-02-06 12:59:01 -08004353 /* First check into global flow table if there is a match */
4354 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4355 if ((ident ^ hash) & ~rps_cpu_mask)
4356 goto try_rps;
4357
4358 next_cpu = ident & rps_cpu_mask;
4359
4360 /* OK, now we know there is a match,
4361 * we can look at the local (per receive queue) flow table
4362 */
Tom Herbert61b905d2014-03-24 15:34:47 -07004363 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07004364 tcpu = rflow->cpu;
4365
Tom Herbertfec5e652010-04-16 16:01:27 -07004366 /*
4367 * If the desired CPU (where last recvmsg was done) is
4368 * different from current CPU (one in the rx-queue flow
4369 * table entry), switch if one of the following holds:
Eric Dumazeta31196b2015-04-25 09:35:24 -07004370 * - Current CPU is unset (>= nr_cpu_ids).
Tom Herbertfec5e652010-04-16 16:01:27 -07004371 * - Current CPU is offline.
4372 * - The current CPU's queue tail has advanced beyond the
4373 * last packet that was enqueued using this table entry.
4374 * This guarantees that all previous packets for the flow
4375 * have been dequeued, thus preserving in order delivery.
4376 */
4377 if (unlikely(tcpu != next_cpu) &&
Eric Dumazeta31196b2015-04-25 09:35:24 -07004378 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
Tom Herbertfec5e652010-04-16 16:01:27 -07004379 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00004380 rflow->last_qtail)) >= 0)) {
4381 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00004382 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00004383 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00004384
Eric Dumazeta31196b2015-04-25 09:35:24 -07004385 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07004386 *rflowp = rflow;
4387 cpu = tcpu;
4388 goto done;
4389 }
4390 }
4391
Eric Dumazet567e4b72015-02-06 12:59:01 -08004392try_rps:
4393
Tom Herbert0a9627f2010-03-16 08:03:29 +00004394 if (map) {
Daniel Borkmann8fc54f62014-08-23 20:58:54 +02004395 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
Tom Herbert0a9627f2010-03-16 08:03:29 +00004396 if (cpu_online(tcpu)) {
4397 cpu = tcpu;
4398 goto done;
4399 }
4400 }
4401
4402done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00004403 return cpu;
4404}
4405
Ben Hutchingsc4454772011-01-19 11:03:53 +00004406#ifdef CONFIG_RFS_ACCEL
4407
4408/**
4409 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4410 * @dev: Device on which the filter was set
4411 * @rxq_index: RX queue index
4412 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4413 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4414 *
4415 * Drivers that implement ndo_rx_flow_steer() should periodically call
4416 * this function for each installed filter and remove the filters for
4417 * which it returns %true.
4418 */
4419bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4420 u32 flow_id, u16 filter_id)
4421{
4422 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4423 struct rps_dev_flow_table *flow_table;
4424 struct rps_dev_flow *rflow;
4425 bool expire = true;
Eric Dumazeta31196b2015-04-25 09:35:24 -07004426 unsigned int cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00004427
4428 rcu_read_lock();
4429 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4430 if (flow_table && flow_id <= flow_table->mask) {
4431 rflow = &flow_table->flows[flow_id];
Mark Rutland6aa7de02017-10-23 14:07:29 -07004432 cpu = READ_ONCE(rflow->cpu);
Eric Dumazeta31196b2015-04-25 09:35:24 -07004433 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
Ben Hutchingsc4454772011-01-19 11:03:53 +00004434 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4435 rflow->last_qtail) <
4436 (int)(10 * flow_table->mask)))
4437 expire = false;
4438 }
4439 rcu_read_unlock();
4440 return expire;
4441}
4442EXPORT_SYMBOL(rps_may_expire_flow);
4443
4444#endif /* CONFIG_RFS_ACCEL */
4445
Tom Herbert0a9627f2010-03-16 08:03:29 +00004446/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004447static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004448{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004449 struct softnet_data *sd = data;
4450
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004451 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00004452 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004453}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004454
Tom Herbertfec5e652010-04-16 16:01:27 -07004455#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00004456
4457/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004458 * Check if this softnet_data structure is another cpu one
4459 * If yes, queue it to our IPI list and return 1
4460 * If no, return 0
4461 */
4462static int rps_ipi_queued(struct softnet_data *sd)
4463{
4464#ifdef CONFIG_RPS
Christoph Lameter903ceff2014-08-17 12:30:35 -05004465 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004466
4467 if (sd != mysd) {
4468 sd->rps_ipi_next = mysd->rps_ipi_list;
4469 mysd->rps_ipi_list = sd;
4470
4471 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4472 return 1;
4473 }
4474#endif /* CONFIG_RPS */
4475 return 0;
4476}
4477
Willem de Bruijn99bbc702013-05-20 04:02:32 +00004478#ifdef CONFIG_NET_FLOW_LIMIT
4479int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4480#endif
4481
4482static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4483{
4484#ifdef CONFIG_NET_FLOW_LIMIT
4485 struct sd_flow_limit *fl;
4486 struct softnet_data *sd;
4487 unsigned int old_flow, new_flow;
4488
4489 if (qlen < (netdev_max_backlog >> 1))
4490 return false;
4491
Christoph Lameter903ceff2014-08-17 12:30:35 -05004492 sd = this_cpu_ptr(&softnet_data);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00004493
4494 rcu_read_lock();
4495 fl = rcu_dereference(sd->flow_limit);
4496 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08004497 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00004498 old_flow = fl->history[fl->history_head];
4499 fl->history[fl->history_head] = new_flow;
4500
4501 fl->history_head++;
4502 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4503
4504 if (likely(fl->buckets[old_flow]))
4505 fl->buckets[old_flow]--;
4506
4507 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4508 fl->count++;
4509 rcu_read_unlock();
4510 return true;
4511 }
4512 }
4513 rcu_read_unlock();
4514#endif
4515 return false;
4516}
4517
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004518/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00004519 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4520 * queue (may be a remote CPU queue).
4521 */
Tom Herbertfec5e652010-04-16 16:01:27 -07004522static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4523 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00004524{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004525 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004526 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00004527 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00004528
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004529 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004530
4531 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004532
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004533 rps_lock(sd);
Julian Anastasove9e4dd32015-07-09 09:59:09 +03004534 if (!netif_running(skb->dev))
4535 goto drop;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00004536 qlen = skb_queue_len(&sd->input_pkt_queue);
4537 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Li RongQinge008f3f2014-12-08 09:42:55 +08004538 if (qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00004539enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004540 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004541 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004542 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00004543 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004544 return NET_RX_SUCCESS;
4545 }
4546
Eric Dumazetebda37c22010-05-06 23:51:21 +00004547 /* Schedule NAPI for backlog device
4548 * We can use non atomic operation since we own the queue lock
4549 */
4550 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004551 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004552 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004553 }
4554 goto enqueue;
4555 }
4556
Julian Anastasove9e4dd32015-07-09 09:59:09 +03004557drop:
Changli Gaodee42872010-05-02 05:42:16 +00004558 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00004559 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004560
Tom Herbert0a9627f2010-03-16 08:03:29 +00004561 local_irq_restore(flags);
4562
Eric Dumazetcaf586e2010-09-30 21:06:55 +00004563 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004564 kfree_skb(skb);
4565 return NET_RX_DROP;
4566}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01004568static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4569{
4570 struct net_device *dev = skb->dev;
4571 struct netdev_rx_queue *rxqueue;
4572
4573 rxqueue = dev->_rx;
4574
4575 if (skb_rx_queue_recorded(skb)) {
4576 u16 index = skb_get_rx_queue(skb);
4577
4578 if (unlikely(index >= dev->real_num_rx_queues)) {
4579 WARN_ONCE(dev->real_num_rx_queues > 1,
4580 "%s received packet on queue %u, but number "
4581 "of RX queues is %u\n",
4582 dev->name, index, dev->real_num_rx_queues);
4583
4584 return rxqueue; /* Return first rxqueue */
4585 }
4586 rxqueue += index;
4587 }
4588 return rxqueue;
4589}
4590
John Fastabendd4455162017-07-17 09:26:45 -07004591static u32 netif_receive_generic_xdp(struct sk_buff *skb,
Björn Töpel02671e22018-05-02 13:01:30 +02004592 struct xdp_buff *xdp,
John Fastabendd4455162017-07-17 09:26:45 -07004593 struct bpf_prog *xdp_prog)
4594{
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01004595 struct netdev_rx_queue *rxqueue;
Nikita V. Shirokov198d83b2018-04-17 21:42:14 -07004596 void *orig_data, *orig_data_end;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004597 u32 metalen, act = XDP_DROP;
Jesper Dangaard Brouer29724952018-10-09 12:04:43 +02004598 __be16 orig_eth_type;
4599 struct ethhdr *eth;
4600 bool orig_bcast;
John Fastabendd4455162017-07-17 09:26:45 -07004601 int hlen, off;
4602 u32 mac_len;
4603
4604 /* Reinjected packets coming from act_mirred or similar should
4605 * not get XDP generic processing.
4606 */
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +01004607 if (skb_is_redirected(skb))
John Fastabendd4455162017-07-17 09:26:45 -07004608 return XDP_PASS;
4609
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004610 /* XDP packets must be linear and must have sufficient headroom
4611 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4612 * native XDP provides, thus we need to do it here as well.
4613 */
Toke Høiland-Jørgensenad1e03b2020-02-10 17:10:46 +01004614 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004615 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4616 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4617 int troom = skb->tail + skb->data_len - skb->end;
4618
4619 /* In case we have to go down the path and also linearize,
4620 * then lets do the pskb_expand_head() work just once here.
4621 */
4622 if (pskb_expand_head(skb,
4623 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4624 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4625 goto do_drop;
Song Liu2d17d8d2017-12-14 17:17:56 -08004626 if (skb_linearize(skb))
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004627 goto do_drop;
4628 }
John Fastabendd4455162017-07-17 09:26:45 -07004629
4630 /* The XDP program wants to see the packet starting at the MAC
4631 * header.
4632 */
4633 mac_len = skb->data - skb_mac_header(skb);
4634 hlen = skb_headlen(skb) + mac_len;
Björn Töpel02671e22018-05-02 13:01:30 +02004635 xdp->data = skb->data - mac_len;
4636 xdp->data_meta = xdp->data;
4637 xdp->data_end = xdp->data + hlen;
4638 xdp->data_hard_start = skb->data - skb_headroom(skb);
Jesper Dangaard Brouera0757672020-05-14 12:49:28 +02004639
4640 /* SKB "head" area always have tailroom for skb_shared_info */
4641 xdp->frame_sz = (void *)skb_end_pointer(skb) - xdp->data_hard_start;
4642 xdp->frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4643
Björn Töpel02671e22018-05-02 13:01:30 +02004644 orig_data_end = xdp->data_end;
4645 orig_data = xdp->data;
Jesper Dangaard Brouer29724952018-10-09 12:04:43 +02004646 eth = (struct ethhdr *)xdp->data;
4647 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4648 orig_eth_type = eth->h_proto;
John Fastabendd4455162017-07-17 09:26:45 -07004649
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01004650 rxqueue = netif_get_rxqueue(skb);
Björn Töpel02671e22018-05-02 13:01:30 +02004651 xdp->rxq = &rxqueue->xdp_rxq;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01004652
Björn Töpel02671e22018-05-02 13:01:30 +02004653 act = bpf_prog_run_xdp(xdp_prog, xdp);
John Fastabendd4455162017-07-17 09:26:45 -07004654
Jesper Dangaard Brouer065af352019-08-01 20:00:31 +02004655 /* check if bpf_xdp_adjust_head was used */
Björn Töpel02671e22018-05-02 13:01:30 +02004656 off = xdp->data - orig_data;
Jesper Dangaard Brouer065af352019-08-01 20:00:31 +02004657 if (off) {
4658 if (off > 0)
4659 __skb_pull(skb, off);
4660 else if (off < 0)
4661 __skb_push(skb, -off);
4662
4663 skb->mac_header += off;
4664 skb_reset_network_header(skb);
4665 }
John Fastabendd4455162017-07-17 09:26:45 -07004666
Jesper Dangaard Brouera0757672020-05-14 12:49:28 +02004667 /* check if bpf_xdp_adjust_tail was used */
4668 off = xdp->data_end - orig_data_end;
Nikita V. Shirokovf7613122018-04-25 07:15:03 -07004669 if (off != 0) {
Björn Töpel02671e22018-05-02 13:01:30 +02004670 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
Jesper Dangaard Brouera0757672020-05-14 12:49:28 +02004671 skb->len += off; /* positive on grow, negative on shrink */
Nikita V. Shirokovf7613122018-04-25 07:15:03 -07004672 }
Nikita V. Shirokov198d83b2018-04-17 21:42:14 -07004673
Jesper Dangaard Brouer29724952018-10-09 12:04:43 +02004674 /* check if XDP changed eth hdr such SKB needs update */
4675 eth = (struct ethhdr *)xdp->data;
4676 if ((orig_eth_type != eth->h_proto) ||
4677 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4678 __skb_push(skb, ETH_HLEN);
4679 skb->protocol = eth_type_trans(skb, skb->dev);
4680 }
4681
John Fastabendd4455162017-07-17 09:26:45 -07004682 switch (act) {
John Fastabend6103aa92017-07-17 09:27:50 -07004683 case XDP_REDIRECT:
John Fastabendd4455162017-07-17 09:26:45 -07004684 case XDP_TX:
4685 __skb_push(skb, mac_len);
John Fastabendd4455162017-07-17 09:26:45 -07004686 break;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004687 case XDP_PASS:
Björn Töpel02671e22018-05-02 13:01:30 +02004688 metalen = xdp->data - xdp->data_meta;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02004689 if (metalen)
4690 skb_metadata_set(skb, metalen);
4691 break;
John Fastabendd4455162017-07-17 09:26:45 -07004692 default:
4693 bpf_warn_invalid_xdp_action(act);
4694 /* fall through */
4695 case XDP_ABORTED:
4696 trace_xdp_exception(skb->dev, xdp_prog, act);
4697 /* fall through */
4698 case XDP_DROP:
4699 do_drop:
4700 kfree_skb(skb);
4701 break;
4702 }
4703
4704 return act;
4705}
4706
4707/* When doing generic XDP we have to bypass the qdisc layer and the
4708 * network taps in order to match in-driver-XDP behavior.
4709 */
Jason Wang7c497472017-08-11 19:41:17 +08004710void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
John Fastabendd4455162017-07-17 09:26:45 -07004711{
4712 struct net_device *dev = skb->dev;
4713 struct netdev_queue *txq;
4714 bool free_skb = true;
4715 int cpu, rc;
4716
Paolo Abeni4bd97d52019-03-20 11:02:04 +01004717 txq = netdev_core_pick_tx(dev, skb, NULL);
John Fastabendd4455162017-07-17 09:26:45 -07004718 cpu = smp_processor_id();
4719 HARD_TX_LOCK(dev, txq, cpu);
4720 if (!netif_xmit_stopped(txq)) {
4721 rc = netdev_start_xmit(skb, dev, txq, 0);
4722 if (dev_xmit_complete(rc))
4723 free_skb = false;
4724 }
4725 HARD_TX_UNLOCK(dev, txq);
4726 if (free_skb) {
4727 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4728 kfree_skb(skb);
4729 }
4730}
4731
Davidlohr Bueso02786472018-05-08 09:07:02 -07004732static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
John Fastabendd4455162017-07-17 09:26:45 -07004733
Jason Wang7c497472017-08-11 19:41:17 +08004734int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
John Fastabendd4455162017-07-17 09:26:45 -07004735{
John Fastabendd4455162017-07-17 09:26:45 -07004736 if (xdp_prog) {
Björn Töpel02671e22018-05-02 13:01:30 +02004737 struct xdp_buff xdp;
4738 u32 act;
John Fastabend6103aa92017-07-17 09:27:50 -07004739 int err;
John Fastabendd4455162017-07-17 09:26:45 -07004740
Björn Töpel02671e22018-05-02 13:01:30 +02004741 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
John Fastabendd4455162017-07-17 09:26:45 -07004742 if (act != XDP_PASS) {
John Fastabend6103aa92017-07-17 09:27:50 -07004743 switch (act) {
4744 case XDP_REDIRECT:
Jesper Dangaard Brouer2facaad2017-08-24 12:33:08 +02004745 err = xdp_do_generic_redirect(skb->dev, skb,
Björn Töpel02671e22018-05-02 13:01:30 +02004746 &xdp, xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07004747 if (err)
4748 goto out_redir;
Björn Töpel02671e22018-05-02 13:01:30 +02004749 break;
John Fastabend6103aa92017-07-17 09:27:50 -07004750 case XDP_TX:
John Fastabendd4455162017-07-17 09:26:45 -07004751 generic_xdp_tx(skb, xdp_prog);
John Fastabend6103aa92017-07-17 09:27:50 -07004752 break;
4753 }
John Fastabendd4455162017-07-17 09:26:45 -07004754 return XDP_DROP;
4755 }
4756 }
4757 return XDP_PASS;
John Fastabend6103aa92017-07-17 09:27:50 -07004758out_redir:
John Fastabend6103aa92017-07-17 09:27:50 -07004759 kfree_skb(skb);
4760 return XDP_DROP;
John Fastabendd4455162017-07-17 09:26:45 -07004761}
Jason Wang7c497472017-08-11 19:41:17 +08004762EXPORT_SYMBOL_GPL(do_xdp_generic);
John Fastabendd4455162017-07-17 09:26:45 -07004763
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004764static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004766 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767
Eric Dumazet588f0332011-11-15 04:12:55 +00004768 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769
Koki Sanagicf66ba52010-08-23 18:45:02 +09004770 trace_netif_rx(skb);
John Fastabendd4455162017-07-17 09:26:45 -07004771
Eric Dumazetdf334542010-03-24 19:13:54 +00004772#ifdef CONFIG_RPS
Eric Dumazetdc053602019-03-22 08:56:38 -07004773 if (static_branch_unlikely(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07004774 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004775 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776
Changli Gaocece1942010-08-07 20:35:43 -07004777 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004778 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07004779
4780 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004781 if (cpu < 0)
4782 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07004783
4784 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4785
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004786 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07004787 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00004788 } else
4789#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07004790 {
4791 unsigned int qtail;
tchardingf4563a72017-02-09 17:56:07 +11004792
Tom Herbertfec5e652010-04-16 16:01:27 -07004793 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4794 put_cpu();
4795 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07004796 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004797}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004798
4799/**
4800 * netif_rx - post buffer to the network code
4801 * @skb: buffer to post
4802 *
4803 * This function receives a packet from a device driver and queues it for
4804 * the upper (protocol) levels to process. It always succeeds. The buffer
4805 * may be dropped during processing for congestion control or by the
4806 * protocol layers.
4807 *
4808 * return values:
4809 * NET_RX_SUCCESS (no congestion)
4810 * NET_RX_DROP (packet was dropped)
4811 *
4812 */
4813
4814int netif_rx(struct sk_buff *skb)
4815{
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05004816 int ret;
4817
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004818 trace_netif_rx_entry(skb);
4819
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05004820 ret = netif_rx_internal(skb);
4821 trace_netif_rx_exit(ret);
4822
4823 return ret;
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004824}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004825EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826
4827int netif_rx_ni(struct sk_buff *skb)
4828{
4829 int err;
4830
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004831 trace_netif_rx_ni_entry(skb);
4832
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004834 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835 if (local_softirq_pending())
4836 do_softirq();
4837 preempt_enable();
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05004838 trace_netif_rx_ni_exit(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839
4840 return err;
4841}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842EXPORT_SYMBOL(netif_rx_ni);
4843
Emese Revfy0766f782016-06-20 20:42:34 +02004844static __latent_entropy void net_tx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845{
Christoph Lameter903ceff2014-08-17 12:30:35 -05004846 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847
4848 if (sd->completion_queue) {
4849 struct sk_buff *clist;
4850
4851 local_irq_disable();
4852 clist = sd->completion_queue;
4853 sd->completion_queue = NULL;
4854 local_irq_enable();
4855
4856 while (clist) {
4857 struct sk_buff *skb = clist;
tchardingf4563a72017-02-09 17:56:07 +11004858
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 clist = clist->next;
4860
Reshetova, Elena63354792017-06-30 13:07:58 +03004861 WARN_ON(refcount_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08004862 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4863 trace_consume_skb(skb);
4864 else
4865 trace_kfree_skb(skb, net_tx_action);
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01004866
4867 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4868 __kfree_skb(skb);
4869 else
4870 __kfree_skb_defer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871 }
Jesper Dangaard Brouer15fad712016-02-08 13:15:04 +01004872
4873 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 }
4875
4876 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07004877 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878
4879 local_irq_disable();
4880 head = sd->output_queue;
4881 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00004882 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 local_irq_enable();
4884
4885 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07004886 struct Qdisc *q = head;
John Fastabend6b3ba912017-12-07 09:54:25 -08004887 spinlock_t *root_lock = NULL;
David S. Miller37437bb2008-07-16 02:15:04 -07004888
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889 head = head->next_sched;
4890
John Fastabend6b3ba912017-12-07 09:54:25 -08004891 if (!(q->flags & TCQ_F_NOLOCK)) {
4892 root_lock = qdisc_lock(q);
4893 spin_lock(root_lock);
4894 }
Eric Dumazet3bcb8462016-06-04 20:02:28 -07004895 /* We need to make sure head->next_sched is read
4896 * before clearing __QDISC_STATE_SCHED
4897 */
4898 smp_mb__before_atomic();
4899 clear_bit(__QDISC_STATE_SCHED, &q->state);
4900 qdisc_run(q);
John Fastabend6b3ba912017-12-07 09:54:25 -08004901 if (root_lock)
4902 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 }
4904 }
Steffen Klassertf53c7232017-12-20 10:41:36 +01004905
4906 xfrm_dev_backlog(sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907}
4908
Javier Martinez Canillas181402a2016-09-09 08:43:15 -04004909#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Michał Mirosławda678292009-06-05 05:35:28 +00004910/* This hook is defined here for ATM LANE */
4911int (*br_fdb_test_addr_hook)(struct net_device *dev,
4912 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07004913EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00004914#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915
Daniel Borkmann1f211a12016-01-07 22:29:47 +01004916static inline struct sk_buff *
4917sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4918 struct net_device *orig_dev)
Herbert Xuf697c3e2007-10-14 00:38:47 -07004919{
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004920#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko46209402017-11-03 11:46:25 +01004921 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004922 struct tcf_result cl_res;
Eric Dumazet24824a02010-10-02 06:11:55 +00004923
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004924 /* If there's at least one ingress present somewhere (so
4925 * we get here via enabled static key), remaining devices
4926 * that are not configured with an ingress qdisc will bail
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004927 * out here.
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004928 */
Jiri Pirko46209402017-11-03 11:46:25 +01004929 if (!miniq)
Daniel Borkmann45771392015-04-10 23:07:54 +02004930 return skb;
Jiri Pirko46209402017-11-03 11:46:25 +01004931
Herbert Xuf697c3e2007-10-14 00:38:47 -07004932 if (*pt_prev) {
4933 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4934 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004935 }
4936
Florian Westphal33654952015-05-14 00:36:28 +02004937 qdisc_skb_cb(skb)->pkt_len = skb->len;
Willem de Bruijn8dc07fd2017-01-07 17:06:37 -05004938 skb->tc_at_ingress = 1;
Jiri Pirko46209402017-11-03 11:46:25 +01004939 mini_qdisc_bstats_cpu_update(miniq, skb);
Daniel Borkmannc9e99fd2015-05-09 22:51:31 +02004940
Paul Blakey7d17c542020-02-16 12:01:22 +02004941 switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
4942 &cl_res, false)) {
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004943 case TC_ACT_OK:
4944 case TC_ACT_RECLASSIFY:
4945 skb->tc_index = TC_H_MIN(cl_res.classid);
4946 break;
4947 case TC_ACT_SHOT:
Jiri Pirko46209402017-11-03 11:46:25 +01004948 mini_qdisc_qstats_cpu_drop(miniq);
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004949 kfree_skb(skb);
4950 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004951 case TC_ACT_STOLEN:
4952 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +02004953 case TC_ACT_TRAP:
Eric Dumazet8a3a4c62016-05-06 15:55:50 -07004954 consume_skb(skb);
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004955 return NULL;
Alexei Starovoitov27b29f62015-09-15 23:05:43 -07004956 case TC_ACT_REDIRECT:
4957 /* skb_mac_header check was done by cls/act_bpf, so
4958 * we can safely push the L2 header back before
4959 * redirecting to another netdev
4960 */
4961 __skb_push(skb, skb->mac_len);
4962 skb_do_redirect(skb);
4963 return NULL;
John Hurley720f22f2019-06-24 23:13:35 +01004964 case TC_ACT_CONSUMED:
Paolo Abenicd11b1642018-07-30 14:30:44 +02004965 return NULL;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02004966 default:
4967 break;
Herbert Xuf697c3e2007-10-14 00:38:47 -07004968 }
Daniel Borkmanne7582ba2015-05-19 22:33:25 +02004969#endif /* CONFIG_NET_CLS_ACT */
Herbert Xuf697c3e2007-10-14 00:38:47 -07004970 return skb;
4971}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004973/**
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07004974 * netdev_is_rx_handler_busy - check if receive handler is registered
4975 * @dev: device to check
4976 *
4977 * Check if a receive handler is already registered for a given device.
4978 * Return true if there one.
4979 *
4980 * The caller must hold the rtnl_mutex.
4981 */
4982bool netdev_is_rx_handler_busy(struct net_device *dev)
4983{
4984 ASSERT_RTNL();
4985 return dev && rtnl_dereference(dev->rx_handler);
4986}
4987EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4988
4989/**
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004990 * netdev_rx_handler_register - register receive handler
4991 * @dev: device to register a handler for
4992 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00004993 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004994 *
Masanari Iidae2278672014-02-18 22:54:36 +09004995 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00004996 * called from __netif_receive_skb. A negative errno code is returned
4997 * on a failure.
4998 *
4999 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00005000 *
5001 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005002 */
5003int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00005004 rx_handler_func_t *rx_handler,
5005 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005006{
Mahesh Bandewar1b7cd002017-01-18 15:02:49 -08005007 if (netdev_is_rx_handler_busy(dev))
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005008 return -EBUSY;
5009
Paolo Abenif54262502018-03-09 10:39:24 +01005010 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5011 return -EINVAL;
5012
Eric Dumazet00cfec32013-03-29 03:01:22 +00005013 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00005014 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005015 rcu_assign_pointer(dev->rx_handler, rx_handler);
5016
5017 return 0;
5018}
5019EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5020
5021/**
5022 * netdev_rx_handler_unregister - unregister receive handler
5023 * @dev: device to unregister a handler from
5024 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00005025 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005026 *
5027 * The caller must hold the rtnl_mutex.
5028 */
5029void netdev_rx_handler_unregister(struct net_device *dev)
5030{
5031
5032 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00005033 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00005034 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5035 * section has a guarantee to see a non NULL rx_handler_data
5036 * as well.
5037 */
5038 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00005039 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005040}
5041EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5042
Mel Gormanb4b9e352012-07-31 16:44:26 -07005043/*
5044 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5045 * the special handling of PFMEMALLOC skbs.
5046 */
5047static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5048{
5049 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07005050 case htons(ETH_P_ARP):
5051 case htons(ETH_P_IP):
5052 case htons(ETH_P_IPV6):
5053 case htons(ETH_P_8021Q):
5054 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07005055 return true;
5056 default:
5057 return false;
5058 }
5059}
5060
Pablo Neirae687ad62015-05-13 18:19:38 +02005061static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5062 int *ret, struct net_device *orig_dev)
5063{
5064 if (nf_hook_ingress_active(skb)) {
Aaron Conole2c1e2702016-09-21 11:35:03 -04005065 int ingress_retval;
5066
Pablo Neirae687ad62015-05-13 18:19:38 +02005067 if (*pt_prev) {
5068 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5069 *pt_prev = NULL;
5070 }
5071
Aaron Conole2c1e2702016-09-21 11:35:03 -04005072 rcu_read_lock();
5073 ingress_retval = nf_hook_ingress(skb);
5074 rcu_read_unlock();
5075 return ingress_retval;
Pablo Neirae687ad62015-05-13 18:19:38 +02005076 }
5077 return 0;
5078}
Pablo Neirae687ad62015-05-13 18:19:38 +02005079
Boris Sukholitkoc0bbbdc2020-05-19 10:32:37 +03005080static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
Edward Cree88eb1942018-07-02 16:13:56 +01005081 struct packet_type **ppt_prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082{
5083 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005084 rx_handler_func_t *rx_handler;
Boris Sukholitkoc0bbbdc2020-05-19 10:32:37 +03005085 struct sk_buff *skb = *pskb;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07005086 struct net_device *orig_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00005087 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08005089 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090
Eric Dumazet588f0332011-11-15 04:12:55 +00005091 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07005092
Koki Sanagicf66ba52010-08-23 18:45:02 +09005093 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08005094
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07005095 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00005096
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07005097 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00005098 if (!skb_transport_header_was_set(skb))
5099 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00005100 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101
5102 pt_prev = NULL;
5103
David S. Miller63d8ea72011-02-28 10:48:59 -08005104another_round:
David S. Millerb6858172012-07-23 16:27:54 -07005105 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08005106
5107 __this_cpu_inc(softnet_data.processed);
5108
Stephen Hemminger458bf2f2019-05-28 11:47:31 -07005109 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5110 int ret2;
5111
5112 preempt_disable();
5113 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5114 preempt_enable();
5115
Boris Sukholitkoc0bbbdc2020-05-19 10:32:37 +03005116 if (ret2 != XDP_PASS) {
5117 ret = NET_RX_DROP;
5118 goto out;
5119 }
Stephen Hemminger458bf2f2019-05-28 11:47:31 -07005120 skb_reset_mac_len(skb);
5121 }
5122
Patrick McHardy8ad227f2013-04-19 02:04:31 +00005123 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
5124 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Vlad Yasevich0d5501c2014-08-08 14:42:13 -04005125 skb = skb_vlan_untag(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +00005126 if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03005127 goto out;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00005128 }
5129
Willem de Bruijne7246e12017-01-07 17:06:35 -05005130 if (skb_skip_tc_classify(skb))
5131 goto skip_classify;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005132
David S. Miller9754e292013-02-14 15:57:38 -05005133 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07005134 goto skip_taps;
5135
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Salam Noureddine7866a622015-01-27 11:35:48 -08005137 if (pt_prev)
5138 ret = deliver_skb(skb, pt_prev, orig_dev);
5139 pt_prev = ptype;
5140 }
5141
5142 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5143 if (pt_prev)
5144 ret = deliver_skb(skb, pt_prev, orig_dev);
5145 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 }
5147
Mel Gormanb4b9e352012-07-31 16:44:26 -07005148skip_taps:
Pablo Neira1cf519002015-05-13 18:19:37 +02005149#ifdef CONFIG_NET_INGRESS
Davidlohr Buesoaabf6772018-05-08 09:07:00 -07005150 if (static_branch_unlikely(&ingress_needed_key)) {
Daniel Borkmann1f211a12016-01-07 22:29:47 +01005151 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
Daniel Borkmann45771392015-04-10 23:07:54 +02005152 if (!skb)
Julian Anastasov2c17d272015-07-09 09:59:10 +03005153 goto out;
Pablo Neirae687ad62015-05-13 18:19:38 +02005154
5155 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
Julian Anastasov2c17d272015-07-09 09:59:10 +03005156 goto out;
Daniel Borkmann45771392015-04-10 23:07:54 +02005157 }
Pablo Neira1cf519002015-05-13 18:19:37 +02005158#endif
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +01005159 skb_reset_redirect(skb);
Willem de Bruijne7246e12017-01-07 17:06:35 -05005160skip_classify:
David S. Miller9754e292013-02-14 15:57:38 -05005161 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07005162 goto drop;
5163
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005164 if (skb_vlan_tag_present(skb)) {
John Fastabend24257172011-10-10 09:16:41 +00005165 if (pt_prev) {
5166 ret = deliver_skb(skb, pt_prev, orig_dev);
5167 pt_prev = NULL;
5168 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00005169 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00005170 goto another_round;
5171 else if (unlikely(!skb))
Julian Anastasov2c17d272015-07-09 09:59:10 +03005172 goto out;
John Fastabend24257172011-10-10 09:16:41 +00005173 }
5174
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00005175 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005176 if (rx_handler) {
5177 if (pt_prev) {
5178 ret = deliver_skb(skb, pt_prev, orig_dev);
5179 pt_prev = NULL;
5180 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00005181 switch (rx_handler(&skb)) {
5182 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00005183 ret = NET_RX_SUCCESS;
Julian Anastasov2c17d272015-07-09 09:59:10 +03005184 goto out;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00005185 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08005186 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00005187 case RX_HANDLER_EXACT:
5188 deliver_exact = true;
5189 case RX_HANDLER_PASS:
5190 break;
5191 default:
5192 BUG();
5193 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00005194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01005196 if (unlikely(skb_vlan_tag_present(skb))) {
Govindarajulu Varadarajan36b2f612019-06-14 06:13:54 -07005197check_vlan_id:
5198 if (skb_vlan_tag_get_id(skb)) {
5199 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5200 * find vlan device.
5201 */
Eric Dumazetd4b812d2013-07-18 07:19:26 -07005202 skb->pkt_type = PACKET_OTHERHOST;
Govindarajulu Varadarajan36b2f612019-06-14 06:13:54 -07005203 } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
5204 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
5205 /* Outer header is 802.1P with vlan 0, inner header is
5206 * 802.1Q or 802.1AD and vlan_do_receive() above could
5207 * not find vlan dev for vlan id 0.
5208 */
5209 __vlan_hwaccel_clear_tag(skb);
5210 skb = skb_vlan_untag(skb);
5211 if (unlikely(!skb))
5212 goto out;
5213 if (vlan_do_receive(&skb))
5214 /* After stripping off 802.1P header with vlan 0
5215 * vlan dev is found for inner header.
5216 */
5217 goto another_round;
5218 else if (unlikely(!skb))
5219 goto out;
5220 else
5221 /* We have stripped outer 802.1P vlan 0 header.
5222 * But could not find vlan dev.
5223 * check again for vlan id to set OTHERHOST.
5224 */
5225 goto check_vlan_id;
5226 }
Eric Dumazetd4b812d2013-07-18 07:19:26 -07005227 /* Note: we might in the future use prio bits
5228 * and set skb->priority like in vlan_do_receive()
5229 * For the time being, just ignore Priority Code Point
5230 */
Michał Mirosławb18175242018-11-09 00:18:02 +01005231 __vlan_hwaccel_clear_tag(skb);
Eric Dumazetd4b812d2013-07-18 07:19:26 -07005232 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00005233
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 type = skb->protocol;
Salam Noureddine7866a622015-01-27 11:35:48 -08005235
5236 /* deliver only exact match when indicated */
5237 if (likely(!deliver_exact)) {
5238 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5239 &ptype_base[ntohs(type) &
5240 PTYPE_HASH_MASK]);
5241 }
5242
5243 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5244 &orig_dev->ptype_specific);
5245
5246 if (unlikely(skb->dev != orig_dev)) {
5247 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5248 &skb->dev->ptype_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249 }
5250
5251 if (pt_prev) {
Willem de Bruijn1f8b9772017-08-03 16:29:41 -04005252 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00005253 goto drop;
Edward Cree88eb1942018-07-02 16:13:56 +01005254 *ppt_prev = pt_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07005256drop:
Jarod Wilson6e7333d2016-02-01 18:51:05 -05005257 if (!deliver_exact)
5258 atomic_long_inc(&skb->dev->rx_dropped);
5259 else
5260 atomic_long_inc(&skb->dev->rx_nohandler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 kfree_skb(skb);
5262 /* Jamal, now you will not able to escape explaining
5263 * me how you were going to use this. :-)
5264 */
5265 ret = NET_RX_DROP;
5266 }
5267
Julian Anastasov2c17d272015-07-09 09:59:10 +03005268out:
Boris Sukholitkoc0bbbdc2020-05-19 10:32:37 +03005269 /* The invariant here is that if *ppt_prev is not NULL
5270 * then skb should also be non-NULL.
5271 *
5272 * Apparently *ppt_prev assignment above holds this invariant due to
5273 * skb dereferencing near it.
5274 */
5275 *pskb = skb;
David S. Miller9754e292013-02-14 15:57:38 -05005276 return ret;
5277}
5278
Edward Cree88eb1942018-07-02 16:13:56 +01005279static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5280{
5281 struct net_device *orig_dev = skb->dev;
5282 struct packet_type *pt_prev = NULL;
5283 int ret;
5284
Boris Sukholitkoc0bbbdc2020-05-19 10:32:37 +03005285 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
Edward Cree88eb1942018-07-02 16:13:56 +01005286 if (pt_prev)
Paolo Abenif5737cb2019-05-03 17:01:36 +02005287 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5288 skb->dev, pt_prev, orig_dev);
Edward Cree88eb1942018-07-02 16:13:56 +01005289 return ret;
5290}
5291
Jesper Dangaard Brouer1c601d82017-10-16 12:19:39 +02005292/**
5293 * netif_receive_skb_core - special purpose version of netif_receive_skb
5294 * @skb: buffer to process
5295 *
5296 * More direct receive version of netif_receive_skb(). It should
5297 * only be used by callers that have a need to skip RPS and Generic XDP.
Mauro Carvalho Chehab2de97802020-03-17 15:54:20 +01005298 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
Jesper Dangaard Brouer1c601d82017-10-16 12:19:39 +02005299 *
5300 * This function may only be called from softirq context and interrupts
5301 * should be enabled.
5302 *
5303 * Return values (usually ignored):
5304 * NET_RX_SUCCESS: no congestion
5305 * NET_RX_DROP: packet was dropped
5306 */
5307int netif_receive_skb_core(struct sk_buff *skb)
5308{
5309 int ret;
5310
5311 rcu_read_lock();
Edward Cree88eb1942018-07-02 16:13:56 +01005312 ret = __netif_receive_skb_one_core(skb, false);
Jesper Dangaard Brouer1c601d82017-10-16 12:19:39 +02005313 rcu_read_unlock();
5314
5315 return ret;
5316}
5317EXPORT_SYMBOL(netif_receive_skb_core);
5318
Edward Cree88eb1942018-07-02 16:13:56 +01005319static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5320 struct packet_type *pt_prev,
5321 struct net_device *orig_dev)
Edward Cree4ce00172018-07-02 16:13:40 +01005322{
5323 struct sk_buff *skb, *next;
5324
Edward Cree88eb1942018-07-02 16:13:56 +01005325 if (!pt_prev)
5326 return;
5327 if (list_empty(head))
5328 return;
Edward Cree17266ee2018-07-02 16:14:12 +01005329 if (pt_prev->list_func != NULL)
Paolo Abenifdf71422019-06-04 11:44:06 +02005330 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5331 ip_list_rcv, head, pt_prev, orig_dev);
Edward Cree17266ee2018-07-02 16:14:12 +01005332 else
Alexander Lobakin9a5a90d2019-03-28 18:23:04 +03005333 list_for_each_entry_safe(skb, next, head, list) {
5334 skb_list_del_init(skb);
Paolo Abenifdf71422019-06-04 11:44:06 +02005335 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Alexander Lobakin9a5a90d2019-03-28 18:23:04 +03005336 }
Edward Cree88eb1942018-07-02 16:13:56 +01005337}
5338
5339static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5340{
5341 /* Fast-path assumptions:
5342 * - There is no RX handler.
5343 * - Only one packet_type matches.
5344 * If either of these fails, we will end up doing some per-packet
5345 * processing in-line, then handling the 'last ptype' for the whole
5346 * sublist. This can't cause out-of-order delivery to any single ptype,
5347 * because the 'last ptype' must be constant across the sublist, and all
5348 * other ptypes are handled per-packet.
5349 */
5350 /* Current (common) ptype of sublist */
5351 struct packet_type *pt_curr = NULL;
5352 /* Current (common) orig_dev of sublist */
5353 struct net_device *od_curr = NULL;
5354 struct list_head sublist;
5355 struct sk_buff *skb, *next;
5356
Edward Cree9af86f932018-07-09 18:10:19 +01005357 INIT_LIST_HEAD(&sublist);
Edward Cree88eb1942018-07-02 16:13:56 +01005358 list_for_each_entry_safe(skb, next, head, list) {
5359 struct net_device *orig_dev = skb->dev;
5360 struct packet_type *pt_prev = NULL;
5361
Edward Cree22f6bbb2018-12-04 17:37:57 +00005362 skb_list_del_init(skb);
Boris Sukholitkoc0bbbdc2020-05-19 10:32:37 +03005363 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
Edward Cree9af86f932018-07-09 18:10:19 +01005364 if (!pt_prev)
5365 continue;
Edward Cree88eb1942018-07-02 16:13:56 +01005366 if (pt_curr != pt_prev || od_curr != orig_dev) {
5367 /* dispatch old sublist */
Edward Cree88eb1942018-07-02 16:13:56 +01005368 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5369 /* start new sublist */
Edward Cree9af86f932018-07-09 18:10:19 +01005370 INIT_LIST_HEAD(&sublist);
Edward Cree88eb1942018-07-02 16:13:56 +01005371 pt_curr = pt_prev;
5372 od_curr = orig_dev;
5373 }
Edward Cree9af86f932018-07-09 18:10:19 +01005374 list_add_tail(&skb->list, &sublist);
Edward Cree88eb1942018-07-02 16:13:56 +01005375 }
5376
5377 /* dispatch final sublist */
Edward Cree9af86f932018-07-09 18:10:19 +01005378 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
Edward Cree4ce00172018-07-02 16:13:40 +01005379}
5380
David S. Miller9754e292013-02-14 15:57:38 -05005381static int __netif_receive_skb(struct sk_buff *skb)
5382{
5383 int ret;
5384
5385 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
Vlastimil Babkaf1083042017-05-08 15:59:53 -07005386 unsigned int noreclaim_flag;
David S. Miller9754e292013-02-14 15:57:38 -05005387
5388 /*
5389 * PFMEMALLOC skbs are special, they should
5390 * - be delivered to SOCK_MEMALLOC sockets only
5391 * - stay away from userspace
5392 * - have bounded memory usage
5393 *
5394 * Use PF_MEMALLOC as this saves us from propagating the allocation
5395 * context down to all allocation sites.
5396 */
Vlastimil Babkaf1083042017-05-08 15:59:53 -07005397 noreclaim_flag = memalloc_noreclaim_save();
Edward Cree88eb1942018-07-02 16:13:56 +01005398 ret = __netif_receive_skb_one_core(skb, true);
Vlastimil Babkaf1083042017-05-08 15:59:53 -07005399 memalloc_noreclaim_restore(noreclaim_flag);
David S. Miller9754e292013-02-14 15:57:38 -05005400 } else
Edward Cree88eb1942018-07-02 16:13:56 +01005401 ret = __netif_receive_skb_one_core(skb, false);
David S. Miller9754e292013-02-14 15:57:38 -05005402
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 return ret;
5404}
Tom Herbert0a9627f2010-03-16 08:03:29 +00005405
Edward Cree4ce00172018-07-02 16:13:40 +01005406static void __netif_receive_skb_list(struct list_head *head)
5407{
5408 unsigned long noreclaim_flag = 0;
5409 struct sk_buff *skb, *next;
5410 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5411
5412 list_for_each_entry_safe(skb, next, head, list) {
5413 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5414 struct list_head sublist;
5415
5416 /* Handle the previous sublist */
5417 list_cut_before(&sublist, head, &skb->list);
Edward Creeb9f463d2018-07-02 16:14:44 +01005418 if (!list_empty(&sublist))
5419 __netif_receive_skb_list_core(&sublist, pfmemalloc);
Edward Cree4ce00172018-07-02 16:13:40 +01005420 pfmemalloc = !pfmemalloc;
5421 /* See comments in __netif_receive_skb */
5422 if (pfmemalloc)
5423 noreclaim_flag = memalloc_noreclaim_save();
5424 else
5425 memalloc_noreclaim_restore(noreclaim_flag);
5426 }
5427 }
5428 /* Handle the remaining sublist */
Edward Creeb9f463d2018-07-02 16:14:44 +01005429 if (!list_empty(head))
5430 __netif_receive_skb_list_core(head, pfmemalloc);
Edward Cree4ce00172018-07-02 16:13:40 +01005431 /* Restore pflags */
5432 if (pfmemalloc)
5433 memalloc_noreclaim_restore(noreclaim_flag);
5434}
5435
Jakub Kicinskif4e63522017-11-03 13:56:16 -07005436static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
David S. Millerb5cdae32017-04-18 15:36:58 -04005437{
Martin KaFai Lau58038692017-06-15 17:29:09 -07005438 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
David S. Millerb5cdae32017-04-18 15:36:58 -04005439 struct bpf_prog *new = xdp->prog;
5440 int ret = 0;
5441
David Ahernfbee97f2020-05-29 16:07:13 -06005442 if (new) {
5443 u32 i;
5444
5445 /* generic XDP does not work with DEVMAPs that can
5446 * have a bpf_prog installed on an entry
5447 */
5448 for (i = 0; i < new->aux->used_map_cnt; i++) {
5449 if (dev_map_can_have_prog(new->aux->used_maps[i]))
5450 return -EINVAL;
Lorenzo Bianconi92164772020-07-14 15:56:38 +02005451 if (cpu_map_prog_allowed(new->aux->used_maps[i]))
5452 return -EINVAL;
David Ahernfbee97f2020-05-29 16:07:13 -06005453 }
5454 }
5455
David S. Millerb5cdae32017-04-18 15:36:58 -04005456 switch (xdp->command) {
Martin KaFai Lau58038692017-06-15 17:29:09 -07005457 case XDP_SETUP_PROG:
David S. Millerb5cdae32017-04-18 15:36:58 -04005458 rcu_assign_pointer(dev->xdp_prog, new);
5459 if (old)
5460 bpf_prog_put(old);
5461
5462 if (old && !new) {
Davidlohr Bueso02786472018-05-08 09:07:02 -07005463 static_branch_dec(&generic_xdp_needed_key);
David S. Millerb5cdae32017-04-18 15:36:58 -04005464 } else if (new && !old) {
Davidlohr Bueso02786472018-05-08 09:07:02 -07005465 static_branch_inc(&generic_xdp_needed_key);
David S. Millerb5cdae32017-04-18 15:36:58 -04005466 dev_disable_lro(dev);
Michael Chan56f5aa72017-12-16 03:09:41 -05005467 dev_disable_gro_hw(dev);
David S. Millerb5cdae32017-04-18 15:36:58 -04005468 }
5469 break;
David S. Millerb5cdae32017-04-18 15:36:58 -04005470
5471 case XDP_QUERY_PROG:
Martin KaFai Lau58038692017-06-15 17:29:09 -07005472 xdp->prog_id = old ? old->aux->id : 0;
David S. Millerb5cdae32017-04-18 15:36:58 -04005473 break;
5474
5475 default:
5476 ret = -EINVAL;
5477 break;
5478 }
5479
5480 return ret;
5481}
5482
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005483static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00005484{
Julian Anastasov2c17d272015-07-09 09:59:10 +03005485 int ret;
5486
Eric Dumazet588f0332011-11-15 04:12:55 +00005487 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07005488
Richard Cochranc1f19b52010-07-17 08:49:36 +00005489 if (skb_defer_rx_timestamp(skb))
5490 return NET_RX_SUCCESS;
5491
John Fastabendbbbe2112017-09-08 14:00:30 -07005492 rcu_read_lock();
Eric Dumazetdf334542010-03-24 19:13:54 +00005493#ifdef CONFIG_RPS
Eric Dumazetdc053602019-03-22 08:56:38 -07005494 if (static_branch_unlikely(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07005495 struct rps_dev_flow voidflow, *rflow = &voidflow;
Julian Anastasov2c17d272015-07-09 09:59:10 +03005496 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07005497
Eric Dumazet3b098e22010-05-15 23:57:10 -07005498 if (cpu >= 0) {
5499 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5500 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00005501 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07005502 }
Tom Herbertfec5e652010-04-16 16:01:27 -07005503 }
Tom Herbert1e94d722010-03-18 17:45:44 -07005504#endif
Julian Anastasov2c17d272015-07-09 09:59:10 +03005505 ret = __netif_receive_skb(skb);
5506 rcu_read_unlock();
5507 return ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00005508}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005509
Edward Cree7da517a2018-07-02 16:13:24 +01005510static void netif_receive_skb_list_internal(struct list_head *head)
5511{
Edward Cree7da517a2018-07-02 16:13:24 +01005512 struct sk_buff *skb, *next;
Edward Cree8c057ef2018-07-09 18:09:54 +01005513 struct list_head sublist;
Edward Cree7da517a2018-07-02 16:13:24 +01005514
Edward Cree8c057ef2018-07-09 18:09:54 +01005515 INIT_LIST_HEAD(&sublist);
Edward Cree7da517a2018-07-02 16:13:24 +01005516 list_for_each_entry_safe(skb, next, head, list) {
5517 net_timestamp_check(netdev_tstamp_prequeue, skb);
Edward Cree22f6bbb2018-12-04 17:37:57 +00005518 skb_list_del_init(skb);
Edward Cree8c057ef2018-07-09 18:09:54 +01005519 if (!skb_defer_rx_timestamp(skb))
5520 list_add_tail(&skb->list, &sublist);
Edward Cree7da517a2018-07-02 16:13:24 +01005521 }
Edward Cree8c057ef2018-07-09 18:09:54 +01005522 list_splice_init(&sublist, head);
Edward Cree7da517a2018-07-02 16:13:24 +01005523
Edward Cree7da517a2018-07-02 16:13:24 +01005524 rcu_read_lock();
5525#ifdef CONFIG_RPS
Eric Dumazetdc053602019-03-22 08:56:38 -07005526 if (static_branch_unlikely(&rps_needed)) {
Edward Cree7da517a2018-07-02 16:13:24 +01005527 list_for_each_entry_safe(skb, next, head, list) {
5528 struct rps_dev_flow voidflow, *rflow = &voidflow;
5529 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5530
5531 if (cpu >= 0) {
Edward Cree8c057ef2018-07-09 18:09:54 +01005532 /* Will be handled, remove from list */
Edward Cree22f6bbb2018-12-04 17:37:57 +00005533 skb_list_del_init(skb);
Edward Cree8c057ef2018-07-09 18:09:54 +01005534 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
Edward Cree7da517a2018-07-02 16:13:24 +01005535 }
5536 }
5537 }
5538#endif
5539 __netif_receive_skb_list(head);
5540 rcu_read_unlock();
5541}
5542
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005543/**
5544 * netif_receive_skb - process receive buffer from network
5545 * @skb: buffer to process
5546 *
5547 * netif_receive_skb() is the main receive data processing function.
5548 * It always succeeds. The buffer may be dropped during processing
5549 * for congestion control or by the protocol layers.
5550 *
5551 * This function may only be called from softirq context and interrupts
5552 * should be enabled.
5553 *
5554 * Return values (usually ignored):
5555 * NET_RX_SUCCESS: no congestion
5556 * NET_RX_DROP: packet was dropped
5557 */
Eric W. Biederman04eb4482015-09-15 20:04:15 -05005558int netif_receive_skb(struct sk_buff *skb)
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005559{
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05005560 int ret;
5561
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005562 trace_netif_receive_skb_entry(skb);
5563
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05005564 ret = netif_receive_skb_internal(skb);
5565 trace_netif_receive_skb_exit(ret);
5566
5567 return ret;
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00005568}
Eric W. Biederman04eb4482015-09-15 20:04:15 -05005569EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570
Edward Creef6ad8c12018-07-02 16:12:45 +01005571/**
5572 * netif_receive_skb_list - process many receive buffers from network
5573 * @head: list of skbs to process.
5574 *
Edward Cree7da517a2018-07-02 16:13:24 +01005575 * Since return value of netif_receive_skb() is normally ignored, and
5576 * wouldn't be meaningful for a list, this function returns void.
Edward Creef6ad8c12018-07-02 16:12:45 +01005577 *
5578 * This function may only be called from softirq context and interrupts
5579 * should be enabled.
5580 */
5581void netif_receive_skb_list(struct list_head *head)
5582{
Edward Cree7da517a2018-07-02 16:13:24 +01005583 struct sk_buff *skb;
Edward Creef6ad8c12018-07-02 16:12:45 +01005584
Edward Creeb9f463d2018-07-02 16:14:44 +01005585 if (list_empty(head))
5586 return;
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05005587 if (trace_netif_receive_skb_list_entry_enabled()) {
5588 list_for_each_entry(skb, head, list)
5589 trace_netif_receive_skb_list_entry(skb);
5590 }
Edward Cree7da517a2018-07-02 16:13:24 +01005591 netif_receive_skb_list_internal(head);
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05005592 trace_netif_receive_skb_list_exit(0);
Edward Creef6ad8c12018-07-02 16:12:45 +01005593}
5594EXPORT_SYMBOL(netif_receive_skb_list);
5595
Wei Yongjunce1e2a72020-07-13 22:23:44 +08005596static DEFINE_PER_CPU(struct work_struct, flush_works);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005597
5598/* Network device is going away, flush any packets still pending */
5599static void flush_backlog(struct work_struct *work)
5600{
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005601 struct sk_buff *skb, *tmp;
5602 struct softnet_data *sd;
5603
5604 local_bh_disable();
5605 sd = this_cpu_ptr(&softnet_data);
5606
5607 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005608 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07005609 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07005610 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005611 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005612 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00005613 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005614 }
Changli Gao6e7676c2010-04-27 15:07:33 -07005615 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005616 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005617 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07005618
5619 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
Eric Dumazet41852492016-08-26 12:50:39 -07005620 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
Changli Gao6e7676c2010-04-27 15:07:33 -07005621 __skb_unlink(skb, &sd->process_queue);
5622 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00005623 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07005624 }
5625 }
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005626 local_bh_enable();
5627}
5628
Eric Dumazet41852492016-08-26 12:50:39 -07005629static void flush_all_backlogs(void)
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005630{
5631 unsigned int cpu;
5632
5633 get_online_cpus();
5634
Eric Dumazet41852492016-08-26 12:50:39 -07005635 for_each_online_cpu(cpu)
5636 queue_work_on(cpu, system_highpri_wq,
5637 per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005638
5639 for_each_online_cpu(cpu)
Eric Dumazet41852492016-08-26 12:50:39 -07005640 flush_work(per_cpu_ptr(&flush_works, cpu));
Paolo Abeni145dd5f2016-08-25 15:58:44 +02005641
5642 put_online_cpus();
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005643}
5644
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005645/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5646static void gro_normal_list(struct napi_struct *napi)
5647{
5648 if (!napi->rx_count)
5649 return;
5650 netif_receive_skb_list_internal(&napi->rx_list);
5651 INIT_LIST_HEAD(&napi->rx_list);
5652 napi->rx_count = 0;
5653}
5654
5655/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5656 * pass the whole batch up to the stack.
5657 */
5658static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
5659{
5660 list_add_tail(&skb->list, &napi->rx_list);
5661 if (++napi->rx_count >= gro_normal_batch)
5662 gro_normal_list(napi);
5663}
5664
Paolo Abeniaaa5d902018-12-14 11:51:58 +01005665INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
5666INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005667static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08005668{
Vlad Yasevich22061d82012-11-15 08:49:11 +00005669 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08005670 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00005671 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08005672 int err = -ENOENT;
5673
Eric Dumazetc3c7c252012-12-06 13:54:59 +00005674 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5675
Herbert Xufc59f9a2009-04-14 15:11:06 -07005676 if (NAPI_GRO_CB(skb)->count == 1) {
5677 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005678 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07005679 }
Herbert Xud565b0a2008-12-15 23:38:52 -08005680
5681 rcu_read_lock();
5682 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00005683 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08005684 continue;
5685
Paolo Abeniaaa5d902018-12-14 11:51:58 +01005686 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5687 ipv6_gro_complete, inet_gro_complete,
5688 skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08005689 break;
5690 }
5691 rcu_read_unlock();
5692
5693 if (err) {
5694 WARN_ON(&ptype->list == head);
5695 kfree_skb(skb);
5696 return NET_RX_SUCCESS;
5697 }
5698
5699out:
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005700 gro_normal_one(napi, skb);
5701 return NET_RX_SUCCESS;
Herbert Xud565b0a2008-12-15 23:38:52 -08005702}
5703
Li RongQing6312fe72018-07-05 14:34:32 +08005704static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
David Miller07d78362018-06-24 14:14:02 +09005705 bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08005706{
Li RongQing6312fe72018-07-05 14:34:32 +08005707 struct list_head *head = &napi->gro_hash[index].list;
David Millerd4546c22018-06-24 14:13:49 +09005708 struct sk_buff *skb, *p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005709
David Miller07d78362018-06-24 14:14:02 +09005710 list_for_each_entry_safe_reverse(skb, p, head, list) {
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00005711 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5712 return;
David S. Miller992cba72018-07-31 15:27:56 -07005713 skb_list_del_init(skb);
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005714 napi_gro_complete(napi, skb);
Li RongQing6312fe72018-07-05 14:34:32 +08005715 napi->gro_hash[index].count--;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00005716 }
Li RongQingd9f37d02018-07-13 14:41:36 +08005717
5718 if (!napi->gro_hash[index].count)
5719 __clear_bit(index, &napi->gro_bitmask);
Herbert Xud565b0a2008-12-15 23:38:52 -08005720}
David Miller07d78362018-06-24 14:14:02 +09005721
Li RongQing6312fe72018-07-05 14:34:32 +08005722/* napi->gro_hash[].list contains packets ordered by age.
David Miller07d78362018-06-24 14:14:02 +09005723 * youngest packets at the head of it.
5724 * Complete skbs in reverse order to reduce latencies.
5725 */
5726void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5727{
Eric Dumazet42519ed2018-11-21 11:39:28 -08005728 unsigned long bitmask = napi->gro_bitmask;
5729 unsigned int i, base = ~0U;
David Miller07d78362018-06-24 14:14:02 +09005730
Eric Dumazet42519ed2018-11-21 11:39:28 -08005731 while ((i = ffs(bitmask)) != 0) {
5732 bitmask >>= i;
5733 base += i;
5734 __napi_gro_flush_chain(napi, base, flush_old);
Li RongQingd9f37d02018-07-13 14:41:36 +08005735 }
David Miller07d78362018-06-24 14:14:02 +09005736}
Eric Dumazet86cac582010-08-31 18:25:32 +00005737EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08005738
David Miller07d78362018-06-24 14:14:02 +09005739static struct list_head *gro_list_prepare(struct napi_struct *napi,
5740 struct sk_buff *skb)
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005741{
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005742 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08005743 u32 hash = skb_get_hash_raw(skb);
David Miller07d78362018-06-24 14:14:02 +09005744 struct list_head *head;
David Millerd4546c22018-06-24 14:13:49 +09005745 struct sk_buff *p;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005746
Li RongQing6312fe72018-07-05 14:34:32 +08005747 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
David Miller07d78362018-06-24 14:14:02 +09005748 list_for_each_entry(p, head, list) {
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005749 unsigned long diffs;
5750
Tom Herbert0b4cec82014-01-15 08:58:06 -08005751 NAPI_GRO_CB(p)->flush = 0;
5752
5753 if (hash != skb_get_hash_raw(p)) {
5754 NAPI_GRO_CB(p)->same_flow = 0;
5755 continue;
5756 }
5757
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005758 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
Michał Mirosławb18175242018-11-09 00:18:02 +01005759 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5760 if (skb_vlan_tag_present(p))
Tonghao Zhangfc5141c2019-11-22 20:38:01 +08005761 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
Jesse Grossce87fc62016-01-20 17:59:49 -08005762 diffs |= skb_metadata_dst_cmp(p, skb);
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02005763 diffs |= skb_metadata_differs(p, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005764 if (maclen == ETH_HLEN)
5765 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07005766 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005767 else if (!diffs)
5768 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07005769 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005770 maclen);
5771 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005772 }
David Miller07d78362018-06-24 14:14:02 +09005773
5774 return head;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005775}
5776
Jerry Chu299603e82013-12-11 20:53:45 -08005777static void skb_gro_reset_offset(struct sk_buff *skb)
5778{
5779 const struct skb_shared_info *pinfo = skb_shinfo(skb);
5780 const skb_frag_t *frag0 = &pinfo->frags[0];
5781
5782 NAPI_GRO_CB(skb)->data_offset = 0;
5783 NAPI_GRO_CB(skb)->frag0 = NULL;
5784 NAPI_GRO_CB(skb)->frag0_len = 0;
5785
Alexander Lobakin8aef9982019-11-15 12:11:35 +03005786 if (!skb_headlen(skb) && pinfo->nr_frags &&
Jerry Chu299603e82013-12-11 20:53:45 -08005787 !PageHighMem(skb_frag_page(frag0))) {
5788 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
Eric Dumazet7cfd5fd2017-01-10 19:52:43 -08005789 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5790 skb_frag_size(frag0),
5791 skb->end - skb->tail);
Herbert Xud565b0a2008-12-15 23:38:52 -08005792 }
5793}
5794
Eric Dumazeta50e2332014-03-29 21:28:21 -07005795static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5796{
5797 struct skb_shared_info *pinfo = skb_shinfo(skb);
5798
5799 BUG_ON(skb->end - skb->tail < grow);
5800
5801 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5802
5803 skb->data_len -= grow;
5804 skb->tail += grow;
5805
Jonathan Lemonb54c9d52019-07-30 07:40:33 -07005806 skb_frag_off_add(&pinfo->frags[0], grow);
Eric Dumazeta50e2332014-03-29 21:28:21 -07005807 skb_frag_size_sub(&pinfo->frags[0], grow);
5808
5809 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5810 skb_frag_unref(skb, 0);
5811 memmove(pinfo->frags, pinfo->frags + 1,
5812 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5813 }
5814}
5815
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005816static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
David Miller07d78362018-06-24 14:14:02 +09005817{
Li RongQing6312fe72018-07-05 14:34:32 +08005818 struct sk_buff *oldest;
David Miller07d78362018-06-24 14:14:02 +09005819
Li RongQing6312fe72018-07-05 14:34:32 +08005820 oldest = list_last_entry(head, struct sk_buff, list);
David Miller07d78362018-06-24 14:14:02 +09005821
Li RongQing6312fe72018-07-05 14:34:32 +08005822 /* We are called with head length >= MAX_GRO_SKBS, so this is
David Miller07d78362018-06-24 14:14:02 +09005823 * impossible.
5824 */
5825 if (WARN_ON_ONCE(!oldest))
5826 return;
5827
Li RongQingd9f37d02018-07-13 14:41:36 +08005828 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5829 * SKB to the chain.
David Miller07d78362018-06-24 14:14:02 +09005830 */
David S. Millerece23712018-10-28 10:35:12 -07005831 skb_list_del_init(oldest);
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005832 napi_gro_complete(napi, oldest);
David Miller07d78362018-06-24 14:14:02 +09005833}
5834
Paolo Abeniaaa5d902018-12-14 11:51:58 +01005835INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
5836 struct sk_buff *));
5837INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
5838 struct sk_buff *));
Rami Rosenbb728822012-11-28 21:55:25 +00005839static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08005840{
Li RongQing6312fe72018-07-05 14:34:32 +08005841 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
David Millerd4546c22018-06-24 14:13:49 +09005842 struct list_head *head = &offload_base;
Vlad Yasevich22061d82012-11-15 08:49:11 +00005843 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08005844 __be16 type = skb->protocol;
David Miller07d78362018-06-24 14:14:02 +09005845 struct list_head *gro_head;
David Millerd4546c22018-06-24 14:13:49 +09005846 struct sk_buff *pp = NULL;
Ben Hutchings5b252f02009-10-29 07:17:09 +00005847 enum gro_result ret;
David Millerd4546c22018-06-24 14:13:49 +09005848 int same_flow;
Eric Dumazeta50e2332014-03-29 21:28:21 -07005849 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08005850
David S. Millerb5cdae32017-04-18 15:36:58 -04005851 if (netif_elide_gro(skb->dev))
Herbert Xud565b0a2008-12-15 23:38:52 -08005852 goto normal;
5853
David Miller07d78362018-06-24 14:14:02 +09005854 gro_head = gro_list_prepare(napi, skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00005855
Herbert Xud565b0a2008-12-15 23:38:52 -08005856 rcu_read_lock();
5857 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00005858 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08005859 continue;
5860
Herbert Xu86911732009-01-29 14:19:50 +00005861 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00005862 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08005863 NAPI_GRO_CB(skb)->same_flow = 0;
Eric Dumazetd61d0722016-11-07 11:12:27 -08005864 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
Herbert Xu5d38a072009-01-04 16:13:40 -08005865 NAPI_GRO_CB(skb)->free = 0;
Jesse Grossfac8e0f2016-03-19 09:32:01 -07005866 NAPI_GRO_CB(skb)->encap_mark = 0;
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02005867 NAPI_GRO_CB(skb)->recursion_counter = 0;
Alexander Duycka0ca1532016-04-05 09:13:39 -07005868 NAPI_GRO_CB(skb)->is_fou = 0;
Alexander Duyck15305452016-04-10 21:44:57 -04005869 NAPI_GRO_CB(skb)->is_atomic = 1;
Tom Herbert15e23962015-02-10 16:30:31 -08005870 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005871
Tom Herbert662880f2014-08-27 21:26:56 -07005872 /* Setup for GRO checksum validation */
5873 switch (skb->ip_summed) {
5874 case CHECKSUM_COMPLETE:
5875 NAPI_GRO_CB(skb)->csum = skb->csum;
5876 NAPI_GRO_CB(skb)->csum_valid = 1;
5877 NAPI_GRO_CB(skb)->csum_cnt = 0;
5878 break;
5879 case CHECKSUM_UNNECESSARY:
5880 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5881 NAPI_GRO_CB(skb)->csum_valid = 0;
5882 break;
5883 default:
5884 NAPI_GRO_CB(skb)->csum_cnt = 0;
5885 NAPI_GRO_CB(skb)->csum_valid = 0;
5886 }
Herbert Xud565b0a2008-12-15 23:38:52 -08005887
Paolo Abeniaaa5d902018-12-14 11:51:58 +01005888 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
5889 ipv6_gro_receive, inet_gro_receive,
5890 gro_head, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08005891 break;
5892 }
5893 rcu_read_unlock();
5894
5895 if (&ptype->list == head)
5896 goto normal;
5897
Masahiro Yamada45586c72020-02-03 17:37:45 -08005898 if (PTR_ERR(pp) == -EINPROGRESS) {
Steffen Klassert25393d32017-02-15 09:39:44 +01005899 ret = GRO_CONSUMED;
5900 goto ok;
5901 }
5902
Herbert Xu0da2afd52008-12-26 14:57:42 -08005903 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005904 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08005905
Herbert Xud565b0a2008-12-15 23:38:52 -08005906 if (pp) {
David S. Miller992cba72018-07-31 15:27:56 -07005907 skb_list_del_init(pp);
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005908 napi_gro_complete(napi, pp);
Li RongQing6312fe72018-07-05 14:34:32 +08005909 napi->gro_hash[hash].count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08005910 }
5911
Herbert Xu0da2afd52008-12-26 14:57:42 -08005912 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08005913 goto ok;
5914
Eric Dumazet600adc12014-01-09 14:12:19 -08005915 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08005916 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08005917
Li RongQing6312fe72018-07-05 14:34:32 +08005918 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00005919 gro_flush_oldest(napi, gro_head);
Eric Dumazet600adc12014-01-09 14:12:19 -08005920 } else {
Li RongQing6312fe72018-07-05 14:34:32 +08005921 napi->gro_hash[hash].count++;
Eric Dumazet600adc12014-01-09 14:12:19 -08005922 }
Herbert Xud565b0a2008-12-15 23:38:52 -08005923 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00005924 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07005925 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00005926 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
David Miller07d78362018-06-24 14:14:02 +09005927 list_add(&skb->list, gro_head);
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005928 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08005929
Herbert Xuad0f9902009-02-01 01:24:55 -08005930pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07005931 grow = skb_gro_offset(skb) - skb_headlen(skb);
5932 if (grow > 0)
5933 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08005934ok:
Li RongQingd9f37d02018-07-13 14:41:36 +08005935 if (napi->gro_hash[hash].count) {
5936 if (!test_bit(hash, &napi->gro_bitmask))
5937 __set_bit(hash, &napi->gro_bitmask);
5938 } else if (test_bit(hash, &napi->gro_bitmask)) {
5939 __clear_bit(hash, &napi->gro_bitmask);
5940 }
5941
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005942 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08005943
5944normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08005945 ret = GRO_NORMAL;
5946 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08005947}
Herbert Xu96e93ea2009-01-06 10:49:34 -08005948
Jerry Chubf5a7552014-01-07 10:23:19 -08005949struct packet_offload *gro_find_receive_by_type(__be16 type)
5950{
5951 struct list_head *offload_head = &offload_base;
5952 struct packet_offload *ptype;
5953
5954 list_for_each_entry_rcu(ptype, offload_head, list) {
5955 if (ptype->type != type || !ptype->callbacks.gro_receive)
5956 continue;
5957 return ptype;
5958 }
5959 return NULL;
5960}
Or Gerlitze27a2f82014-01-20 13:59:20 +02005961EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08005962
5963struct packet_offload *gro_find_complete_by_type(__be16 type)
5964{
5965 struct list_head *offload_head = &offload_base;
5966 struct packet_offload *ptype;
5967
5968 list_for_each_entry_rcu(ptype, offload_head, list) {
5969 if (ptype->type != type || !ptype->callbacks.gro_complete)
5970 continue;
5971 return ptype;
5972 }
5973 return NULL;
5974}
Or Gerlitze27a2f82014-01-20 13:59:20 +02005975EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08005976
Michal Kubečeke44699d2017-06-29 11:13:36 +02005977static void napi_skb_free_stolen_head(struct sk_buff *skb)
5978{
5979 skb_dst_drop(skb);
Florian Westphal174e2382019-09-26 20:37:05 +02005980 skb_ext_put(skb);
Michal Kubečeke44699d2017-06-29 11:13:36 +02005981 kmem_cache_free(skbuff_head_cache, skb);
5982}
5983
Alexander Lobakin6570bc72019-10-14 11:00:33 +03005984static gro_result_t napi_skb_finish(struct napi_struct *napi,
5985 struct sk_buff *skb,
5986 gro_result_t ret)
Herbert Xu5d38a072009-01-04 16:13:40 -08005987{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005988 switch (ret) {
5989 case GRO_NORMAL:
Alexander Lobakin6570bc72019-10-14 11:00:33 +03005990 gro_normal_one(napi, skb);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07005991 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08005992
Herbert Xu5d0d9be2009-01-29 14:19:48 +00005993 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08005994 kfree_skb(skb);
5995 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00005996
Eric Dumazetdaa86542012-04-19 07:07:40 +00005997 case GRO_MERGED_FREE:
Michal Kubečeke44699d2017-06-29 11:13:36 +02005998 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5999 napi_skb_free_stolen_head(skb);
6000 else
Eric Dumazetd7e88832012-04-30 08:10:34 +00006001 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00006002 break;
6003
Ben Hutchings5b252f02009-10-29 07:17:09 +00006004 case GRO_HELD:
6005 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01006006 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00006007 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08006008 }
6009
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07006010 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006011}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006012
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07006013gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006014{
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05006015 gro_result_t ret;
6016
Eric Dumazet93f93a42015-11-18 06:30:59 -08006017 skb_mark_napi_id(skb, napi);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00006018 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00006019
Eric Dumazeta50e2332014-03-29 21:28:21 -07006020 skb_gro_reset_offset(skb);
6021
Alexander Lobakin6570bc72019-10-14 11:00:33 +03006022 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05006023 trace_napi_gro_receive_exit(ret);
6024
6025 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08006026}
6027EXPORT_SYMBOL(napi_gro_receive);
6028
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00006029static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08006030{
Eric Dumazet93a35f52014-10-23 06:30:30 -07006031 if (unlikely(skb->pfmemalloc)) {
6032 consume_skb(skb);
6033 return;
6034 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08006035 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00006036 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
6037 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Michał Mirosławb18175242018-11-09 00:18:02 +01006038 __vlan_hwaccel_clear_tag(skb);
Herbert Xu66c46d72011-01-29 20:44:54 -08006039 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08006040 skb->skb_iif = 0;
Eric Dumazet33d9a2c2018-11-17 21:57:02 -08006041
6042 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
6043 skb->pkt_type = PACKET_HOST;
6044
Jerry Chuc3caf112014-07-14 15:54:46 -07006045 skb->encapsulation = 0;
6046 skb_shinfo(skb)->gso_type = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07006047 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Florian Westphal174e2382019-09-26 20:37:05 +02006048 skb_ext_reset(skb);
Herbert Xu96e93ea2009-01-06 10:49:34 -08006049
6050 napi->skb = skb;
6051}
Herbert Xu96e93ea2009-01-06 10:49:34 -08006052
Herbert Xu76620aa2009-04-16 02:02:07 -07006053struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08006054{
Herbert Xu5d38a072009-01-04 16:13:40 -08006055 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08006056
6057 if (!skb) {
Alexander Duyckfd11a832014-12-09 19:40:49 -08006058 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
Eric Dumazete2f9dc32015-11-19 12:11:23 -08006059 if (skb) {
6060 napi->skb = skb;
6061 skb_mark_napi_id(skb, napi);
6062 }
Herbert Xu5d38a072009-01-04 16:13:40 -08006063 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08006064 return skb;
6065}
Herbert Xu76620aa2009-04-16 02:02:07 -07006066EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08006067
Eric Dumazeta50e2332014-03-29 21:28:21 -07006068static gro_result_t napi_frags_finish(struct napi_struct *napi,
6069 struct sk_buff *skb,
6070 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006071{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006072 switch (ret) {
6073 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07006074 case GRO_HELD:
6075 __skb_push(skb, ETH_HLEN);
6076 skb->protocol = eth_type_trans(skb, skb->dev);
Edward Cree323ebb62019-08-06 14:53:55 +01006077 if (ret == GRO_NORMAL)
6078 gro_normal_one(napi, skb);
Herbert Xu86911732009-01-29 14:19:50 +00006079 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006080
6081 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006082 napi_reuse_skb(napi, skb);
6083 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00006084
Michal Kubečeke44699d2017-06-29 11:13:36 +02006085 case GRO_MERGED_FREE:
6086 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6087 napi_skb_free_stolen_head(skb);
6088 else
6089 napi_reuse_skb(napi, skb);
6090 break;
6091
Ben Hutchings5b252f02009-10-29 07:17:09 +00006092 case GRO_MERGED:
Steffen Klassert25393d32017-02-15 09:39:44 +01006093 case GRO_CONSUMED:
Ben Hutchings5b252f02009-10-29 07:17:09 +00006094 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006095 }
6096
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07006097 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006098}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00006099
Eric Dumazeta50e2332014-03-29 21:28:21 -07006100/* Upper GRO stack assumes network header starts at gro_offset=0
6101 * Drivers could call both napi_gro_frags() and napi_gro_receive()
6102 * We copy ethernet header into skb->data to have a common layout.
6103 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00006104static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08006105{
Herbert Xu76620aa2009-04-16 02:02:07 -07006106 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07006107 const struct ethhdr *eth;
6108 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07006109
6110 napi->skb = NULL;
6111
Eric Dumazeta50e2332014-03-29 21:28:21 -07006112 skb_reset_mac_header(skb);
6113 skb_gro_reset_offset(skb);
6114
Eric Dumazeta50e2332014-03-29 21:28:21 -07006115 if (unlikely(skb_gro_header_hard(skb, hlen))) {
6116 eth = skb_gro_header_slow(skb, hlen, 0);
6117 if (unlikely(!eth)) {
Aaron Conole4da46ce2016-04-02 15:26:43 -04006118 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6119 __func__, napi->dev->name);
Eric Dumazeta50e2332014-03-29 21:28:21 -07006120 napi_reuse_skb(napi, skb);
6121 return NULL;
6122 }
6123 } else {
Eric Dumazeta4270d62019-05-29 15:36:10 -07006124 eth = (const struct ethhdr *)skb->data;
Eric Dumazeta50e2332014-03-29 21:28:21 -07006125 gro_pull_from_frag0(skb, hlen);
6126 NAPI_GRO_CB(skb)->frag0 += hlen;
6127 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07006128 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07006129 __skb_pull(skb, hlen);
6130
6131 /*
6132 * This works because the only protocols we care about don't require
6133 * special handling.
6134 * We'll fix it up properly in napi_frags_finish()
6135 */
6136 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07006137
Herbert Xu76620aa2009-04-16 02:02:07 -07006138 return skb;
6139}
Herbert Xu76620aa2009-04-16 02:02:07 -07006140
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07006141gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07006142{
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05006143 gro_result_t ret;
Herbert Xu76620aa2009-04-16 02:02:07 -07006144 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08006145
6146 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07006147 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08006148
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00006149 trace_napi_gro_frags_entry(skb);
6150
Geneviève Bastienb0e3f1b2018-11-27 12:52:39 -05006151 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6152 trace_napi_gro_frags_exit(ret);
6153
6154 return ret;
Herbert Xu5d38a072009-01-04 16:13:40 -08006155}
6156EXPORT_SYMBOL(napi_gro_frags);
6157
Tom Herbert573e8fc2014-08-22 13:33:47 -07006158/* Compute the checksum from gro_offset and return the folded value
6159 * after adding in any pseudo checksum.
6160 */
6161__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
6162{
6163 __wsum wsum;
6164 __sum16 sum;
6165
6166 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
6167
6168 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
6169 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
Cong Wang14641932018-11-26 09:31:26 -08006170 /* See comments in __skb_checksum_complete(). */
Tom Herbert573e8fc2014-08-22 13:33:47 -07006171 if (likely(!sum)) {
6172 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
6173 !skb->csum_complete_sw)
Cong Wang7fe50ac2018-11-12 14:47:18 -08006174 netdev_rx_csum_fault(skb->dev, skb);
Tom Herbert573e8fc2014-08-22 13:33:47 -07006175 }
6176
6177 NAPI_GRO_CB(skb)->csum = wsum;
6178 NAPI_GRO_CB(skb)->csum_valid = 1;
6179
6180 return sum;
6181}
6182EXPORT_SYMBOL(__skb_gro_checksum_complete);
6183
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05306184static void net_rps_send_ipi(struct softnet_data *remsd)
6185{
6186#ifdef CONFIG_RPS
6187 while (remsd) {
6188 struct softnet_data *next = remsd->rps_ipi_next;
6189
6190 if (cpu_online(remsd->cpu))
6191 smp_call_function_single_async(remsd->cpu, &remsd->csd);
6192 remsd = next;
6193 }
6194#endif
6195}
6196
Eric Dumazete326bed2010-04-22 00:22:45 -07006197/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08006198 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07006199 * Note: called with local irq disabled, but exits with local irq enabled.
6200 */
6201static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6202{
6203#ifdef CONFIG_RPS
6204 struct softnet_data *remsd = sd->rps_ipi_list;
6205
6206 if (remsd) {
6207 sd->rps_ipi_list = NULL;
6208
6209 local_irq_enable();
6210
6211 /* Send pending IPI's to kick RPS processing on remote cpus. */
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +05306212 net_rps_send_ipi(remsd);
Eric Dumazete326bed2010-04-22 00:22:45 -07006213 } else
6214#endif
6215 local_irq_enable();
6216}
6217
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006218static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6219{
6220#ifdef CONFIG_RPS
6221 return sd->rps_ipi_list != NULL;
6222#else
6223 return false;
6224#endif
6225}
6226
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006227static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006228{
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07006229 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02006230 bool again = true;
6231 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006232
Eric Dumazete326bed2010-04-22 00:22:45 -07006233 /* Check if we have pending ipi, its better to send them now,
6234 * not waiting net_rx_action() end.
6235 */
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006236 if (sd_has_rps_ipi_waiting(sd)) {
Eric Dumazete326bed2010-04-22 00:22:45 -07006237 local_irq_disable();
6238 net_rps_action_and_irq_enable(sd);
6239 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006240
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01006241 napi->weight = dev_rx_weight;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02006242 while (again) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006243 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006244
Changli Gao6e7676c2010-04-27 15:07:33 -07006245 while ((skb = __skb_dequeue(&sd->process_queue))) {
Julian Anastasov2c17d272015-07-09 09:59:10 +03006246 rcu_read_lock();
Changli Gao6e7676c2010-04-27 15:07:33 -07006247 __netif_receive_skb(skb);
Julian Anastasov2c17d272015-07-09 09:59:10 +03006248 rcu_read_unlock();
Tom Herbert76cc8b12010-05-20 18:37:59 +00006249 input_queue_head_incr(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02006250 if (++work >= quota)
Tom Herbert76cc8b12010-05-20 18:37:59 +00006251 return work;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02006252
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006254
Paolo Abeni145dd5f2016-08-25 15:58:44 +02006255 local_irq_disable();
Changli Gao6e7676c2010-04-27 15:07:33 -07006256 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07006257 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07006258 /*
6259 * Inline a custom version of __napi_complete().
6260 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07006261 * and NAPI_STATE_SCHED is the only possible flag set
6262 * on backlog.
6263 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07006264 * and we dont need an smp_mb() memory barrier.
6265 */
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07006266 napi->state = 0;
Paolo Abeni145dd5f2016-08-25 15:58:44 +02006267 again = false;
6268 } else {
6269 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6270 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006271 }
6272 rps_unlock(sd);
Paolo Abeni145dd5f2016-08-25 15:58:44 +02006273 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07006274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006275
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006276 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006277}
6278
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006279/**
6280 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07006281 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006282 *
Eric Dumazetbc9ad162014-10-28 18:05:13 -07006283 * The entry's receive function will be scheduled to run.
6284 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006285 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08006286void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006287{
6288 unsigned long flags;
6289
6290 local_irq_save(flags);
Christoph Lameter903ceff2014-08-17 12:30:35 -05006291 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006292 local_irq_restore(flags);
6293}
6294EXPORT_SYMBOL(__napi_schedule);
6295
Eric Dumazetbc9ad162014-10-28 18:05:13 -07006296/**
Eric Dumazet39e6c822017-02-28 10:34:50 -08006297 * napi_schedule_prep - check if napi can be scheduled
6298 * @n: napi context
6299 *
6300 * Test if NAPI routine is already running, and if not mark
6301 * it as running. This is used as a condition variable
6302 * insure only one NAPI poll instance runs. We also make
6303 * sure there is no pending NAPI disable.
6304 */
6305bool napi_schedule_prep(struct napi_struct *n)
6306{
6307 unsigned long val, new;
6308
6309 do {
6310 val = READ_ONCE(n->state);
6311 if (unlikely(val & NAPIF_STATE_DISABLE))
6312 return false;
6313 new = val | NAPIF_STATE_SCHED;
6314
6315 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6316 * This was suggested by Alexander Duyck, as compiler
6317 * emits better code than :
6318 * if (val & NAPIF_STATE_SCHED)
6319 * new |= NAPIF_STATE_MISSED;
6320 */
6321 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6322 NAPIF_STATE_MISSED;
6323 } while (cmpxchg(&n->state, val, new) != val);
6324
6325 return !(val & NAPIF_STATE_SCHED);
6326}
6327EXPORT_SYMBOL(napi_schedule_prep);
6328
6329/**
Eric Dumazetbc9ad162014-10-28 18:05:13 -07006330 * __napi_schedule_irqoff - schedule for receive
6331 * @n: entry to schedule
6332 *
6333 * Variant of __napi_schedule() assuming hard irqs are masked
6334 */
6335void __napi_schedule_irqoff(struct napi_struct *n)
6336{
6337 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6338}
6339EXPORT_SYMBOL(__napi_schedule_irqoff);
6340
Eric Dumazet364b6052016-11-15 10:15:13 -08006341bool napi_complete_done(struct napi_struct *n, int work_done)
Herbert Xud565b0a2008-12-15 23:38:52 -08006342{
Eric Dumazet6f8b12d2020-04-22 09:13:27 -07006343 unsigned long flags, val, new, timeout = 0;
6344 bool ret = true;
Herbert Xud565b0a2008-12-15 23:38:52 -08006345
6346 /*
Eric Dumazet217f6972016-11-15 10:15:11 -08006347 * 1) Don't let napi dequeue from the cpu poll list
6348 * just in case its running on a different cpu.
6349 * 2) If we are busy polling, do nothing here, we have
6350 * the guarantee we will be called later.
Herbert Xud565b0a2008-12-15 23:38:52 -08006351 */
Eric Dumazet217f6972016-11-15 10:15:11 -08006352 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6353 NAPIF_STATE_IN_BUSY_POLL)))
Eric Dumazet364b6052016-11-15 10:15:13 -08006354 return false;
Herbert Xud565b0a2008-12-15 23:38:52 -08006355
Eric Dumazet6f8b12d2020-04-22 09:13:27 -07006356 if (work_done) {
6357 if (n->gro_bitmask)
Eric Dumazet7e417a62020-04-22 09:13:28 -07006358 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6359 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
Eric Dumazet6f8b12d2020-04-22 09:13:27 -07006360 }
6361 if (n->defer_hard_irqs_count > 0) {
6362 n->defer_hard_irqs_count--;
Eric Dumazet7e417a62020-04-22 09:13:28 -07006363 timeout = READ_ONCE(n->dev->gro_flush_timeout);
Eric Dumazet6f8b12d2020-04-22 09:13:27 -07006364 if (timeout)
6365 ret = false;
6366 }
6367 if (n->gro_bitmask) {
Paolo Abeni605108a2018-11-21 18:21:35 +01006368 /* When the NAPI instance uses a timeout and keeps postponing
6369 * it, we need to bound somehow the time packets are kept in
6370 * the GRO layer
6371 */
6372 napi_gro_flush(n, !!timeout);
Eric Dumazet3b47d302014-11-06 21:09:44 -08006373 }
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00006374
6375 gro_normal_list(n);
6376
Eric Dumazet02c16022017-02-04 15:25:02 -08006377 if (unlikely(!list_empty(&n->poll_list))) {
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006378 /* If n->poll_list is not empty, we need to mask irqs */
6379 local_irq_save(flags);
Eric Dumazet02c16022017-02-04 15:25:02 -08006380 list_del_init(&n->poll_list);
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006381 local_irq_restore(flags);
6382 }
Eric Dumazet39e6c822017-02-28 10:34:50 -08006383
6384 do {
6385 val = READ_ONCE(n->state);
6386
6387 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6388
6389 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
6390
6391 /* If STATE_MISSED was set, leave STATE_SCHED set,
6392 * because we will call napi->poll() one more time.
6393 * This C code was suggested by Alexander Duyck to help gcc.
6394 */
6395 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6396 NAPIF_STATE_SCHED;
6397 } while (cmpxchg(&n->state, val, new) != val);
6398
6399 if (unlikely(val & NAPIF_STATE_MISSED)) {
6400 __napi_schedule(n);
6401 return false;
6402 }
6403
Eric Dumazet6f8b12d2020-04-22 09:13:27 -07006404 if (timeout)
6405 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6406 HRTIMER_MODE_REL_PINNED);
6407 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08006408}
Eric Dumazet3b47d302014-11-06 21:09:44 -08006409EXPORT_SYMBOL(napi_complete_done);
Herbert Xud565b0a2008-12-15 23:38:52 -08006410
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006411/* must be called under rcu_read_lock(), as we dont take a reference */
Eric Dumazet02d62e82015-11-18 06:30:52 -08006412static struct napi_struct *napi_by_id(unsigned int napi_id)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006413{
6414 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6415 struct napi_struct *napi;
6416
6417 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6418 if (napi->napi_id == napi_id)
6419 return napi;
6420
6421 return NULL;
6422}
Eric Dumazet02d62e82015-11-18 06:30:52 -08006423
6424#if defined(CONFIG_NET_RX_BUSY_POLL)
Eric Dumazet217f6972016-11-15 10:15:11 -08006425
Eric Dumazetce6aea92015-11-18 06:30:54 -08006426#define BUSY_POLL_BUDGET 8
Eric Dumazet217f6972016-11-15 10:15:11 -08006427
6428static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
6429{
6430 int rc;
6431
Eric Dumazet39e6c822017-02-28 10:34:50 -08006432 /* Busy polling means there is a high chance device driver hard irq
6433 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6434 * set in napi_schedule_prep().
6435 * Since we are about to call napi->poll() once more, we can safely
6436 * clear NAPI_STATE_MISSED.
6437 *
6438 * Note: x86 could use a single "lock and ..." instruction
6439 * to perform these two clear_bit()
6440 */
6441 clear_bit(NAPI_STATE_MISSED, &napi->state);
Eric Dumazet217f6972016-11-15 10:15:11 -08006442 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6443
6444 local_bh_disable();
6445
6446 /* All we really want here is to re-enable device interrupts.
6447 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6448 */
6449 rc = napi->poll(napi, BUSY_POLL_BUDGET);
Edward Cree323ebb62019-08-06 14:53:55 +01006450 /* We can't gro_normal_list() here, because napi->poll() might have
6451 * rearmed the napi (napi_complete_done()) in which case it could
6452 * already be running on another CPU.
6453 */
Jesper Dangaard Brouer1e223912017-08-25 15:04:32 +02006454 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
Eric Dumazet217f6972016-11-15 10:15:11 -08006455 netpoll_poll_unlock(have_poll_lock);
Edward Cree323ebb62019-08-06 14:53:55 +01006456 if (rc == BUSY_POLL_BUDGET) {
6457 /* As the whole budget was spent, we still own the napi so can
6458 * safely handle the rx_list.
6459 */
6460 gro_normal_list(napi);
Eric Dumazet217f6972016-11-15 10:15:11 -08006461 __napi_schedule(napi);
Edward Cree323ebb62019-08-06 14:53:55 +01006462 }
Eric Dumazet217f6972016-11-15 10:15:11 -08006463 local_bh_enable();
Eric Dumazet217f6972016-11-15 10:15:11 -08006464}
6465
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07006466void napi_busy_loop(unsigned int napi_id,
6467 bool (*loop_end)(void *, unsigned long),
6468 void *loop_end_arg)
Eric Dumazet02d62e82015-11-18 06:30:52 -08006469{
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07006470 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
Eric Dumazet217f6972016-11-15 10:15:11 -08006471 int (*napi_poll)(struct napi_struct *napi, int budget);
Eric Dumazet217f6972016-11-15 10:15:11 -08006472 void *have_poll_lock = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08006473 struct napi_struct *napi;
Eric Dumazet217f6972016-11-15 10:15:11 -08006474
6475restart:
Eric Dumazet217f6972016-11-15 10:15:11 -08006476 napi_poll = NULL;
Eric Dumazet02d62e82015-11-18 06:30:52 -08006477
Eric Dumazet2a028ec2015-11-18 06:30:53 -08006478 rcu_read_lock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08006479
Alexander Duyck545cd5e2017-03-24 10:07:53 -07006480 napi = napi_by_id(napi_id);
Eric Dumazet02d62e82015-11-18 06:30:52 -08006481 if (!napi)
6482 goto out;
6483
Eric Dumazet217f6972016-11-15 10:15:11 -08006484 preempt_disable();
6485 for (;;) {
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07006486 int work = 0;
6487
Eric Dumazet2a028ec2015-11-18 06:30:53 -08006488 local_bh_disable();
Eric Dumazet217f6972016-11-15 10:15:11 -08006489 if (!napi_poll) {
6490 unsigned long val = READ_ONCE(napi->state);
6491
6492 /* If multiple threads are competing for this napi,
6493 * we avoid dirtying napi->state as much as we can.
6494 */
6495 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6496 NAPIF_STATE_IN_BUSY_POLL))
6497 goto count;
6498 if (cmpxchg(&napi->state, val,
6499 val | NAPIF_STATE_IN_BUSY_POLL |
6500 NAPIF_STATE_SCHED) != val)
6501 goto count;
6502 have_poll_lock = netpoll_poll_lock(napi);
6503 napi_poll = napi->poll;
6504 }
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07006505 work = napi_poll(napi, BUSY_POLL_BUDGET);
6506 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
Edward Cree323ebb62019-08-06 14:53:55 +01006507 gro_normal_list(napi);
Eric Dumazet217f6972016-11-15 10:15:11 -08006508count:
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07006509 if (work > 0)
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07006510 __NET_ADD_STATS(dev_net(napi->dev),
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07006511 LINUX_MIB_BUSYPOLLRXPACKETS, work);
Eric Dumazet2a028ec2015-11-18 06:30:53 -08006512 local_bh_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08006513
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07006514 if (!loop_end || loop_end(loop_end_arg, start_time))
Eric Dumazet217f6972016-11-15 10:15:11 -08006515 break;
Eric Dumazet02d62e82015-11-18 06:30:52 -08006516
Eric Dumazet217f6972016-11-15 10:15:11 -08006517 if (unlikely(need_resched())) {
6518 if (napi_poll)
6519 busy_poll_stop(napi, have_poll_lock);
6520 preempt_enable();
6521 rcu_read_unlock();
6522 cond_resched();
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07006523 if (loop_end(loop_end_arg, start_time))
Alexander Duyck2b5cd0d2017-03-24 10:08:12 -07006524 return;
Eric Dumazet217f6972016-11-15 10:15:11 -08006525 goto restart;
6526 }
Linus Torvalds6cdf89b2016-12-12 10:48:02 -08006527 cpu_relax();
Eric Dumazet217f6972016-11-15 10:15:11 -08006528 }
6529 if (napi_poll)
6530 busy_poll_stop(napi, have_poll_lock);
6531 preempt_enable();
Eric Dumazet02d62e82015-11-18 06:30:52 -08006532out:
Eric Dumazet2a028ec2015-11-18 06:30:53 -08006533 rcu_read_unlock();
Eric Dumazet02d62e82015-11-18 06:30:52 -08006534}
Sridhar Samudrala7db6b042017-03-24 10:08:24 -07006535EXPORT_SYMBOL(napi_busy_loop);
Eric Dumazet02d62e82015-11-18 06:30:52 -08006536
6537#endif /* CONFIG_NET_RX_BUSY_POLL */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006538
Eric Dumazet149d6ad2016-11-08 11:07:28 -08006539static void napi_hash_add(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006540{
Eric Dumazetd64b5e82015-11-18 06:31:00 -08006541 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6542 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
Eric Dumazet52bd2d62015-11-18 06:30:50 -08006543 return;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006544
Eric Dumazet52bd2d62015-11-18 06:30:50 -08006545 spin_lock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006546
Alexander Duyck545cd5e2017-03-24 10:07:53 -07006547 /* 0..NR_CPUS range is reserved for sender_cpu use */
Eric Dumazet52bd2d62015-11-18 06:30:50 -08006548 do {
Alexander Duyck545cd5e2017-03-24 10:07:53 -07006549 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6550 napi_gen_id = MIN_NAPI_ID;
Eric Dumazet52bd2d62015-11-18 06:30:50 -08006551 } while (napi_by_id(napi_gen_id));
6552 napi->napi_id = napi_gen_id;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006553
Eric Dumazet52bd2d62015-11-18 06:30:50 -08006554 hlist_add_head_rcu(&napi->napi_hash_node,
6555 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006556
Eric Dumazet52bd2d62015-11-18 06:30:50 -08006557 spin_unlock(&napi_hash_lock);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006558}
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006559
6560/* Warning : caller is responsible to make sure rcu grace period
6561 * is respected before freeing memory containing @napi
6562 */
Eric Dumazet34cbe272015-11-18 06:31:02 -08006563bool napi_hash_del(struct napi_struct *napi)
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006564{
Eric Dumazet34cbe272015-11-18 06:31:02 -08006565 bool rcu_sync_needed = false;
6566
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006567 spin_lock(&napi_hash_lock);
6568
Eric Dumazet34cbe272015-11-18 06:31:02 -08006569 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6570 rcu_sync_needed = true;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006571 hlist_del_rcu(&napi->napi_hash_node);
Eric Dumazet34cbe272015-11-18 06:31:02 -08006572 }
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006573 spin_unlock(&napi_hash_lock);
Eric Dumazet34cbe272015-11-18 06:31:02 -08006574 return rcu_sync_needed;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03006575}
6576EXPORT_SYMBOL_GPL(napi_hash_del);
6577
Eric Dumazet3b47d302014-11-06 21:09:44 -08006578static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6579{
6580 struct napi_struct *napi;
6581
6582 napi = container_of(timer, struct napi_struct, timer);
Eric Dumazet39e6c822017-02-28 10:34:50 -08006583
6584 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6585 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6586 */
Eric Dumazet6f8b12d2020-04-22 09:13:27 -07006587 if (!napi_disable_pending(napi) &&
Eric Dumazet39e6c822017-02-28 10:34:50 -08006588 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6589 __napi_schedule_irqoff(napi);
Eric Dumazet3b47d302014-11-06 21:09:44 -08006590
6591 return HRTIMER_NORESTART;
6592}
6593
David S. Miller7c4ec742018-07-20 23:37:55 -07006594static void init_gro_hash(struct napi_struct *napi)
Herbert Xud565b0a2008-12-15 23:38:52 -08006595{
David Miller07d78362018-06-24 14:14:02 +09006596 int i;
6597
Li RongQing6312fe72018-07-05 14:34:32 +08006598 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6599 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6600 napi->gro_hash[i].count = 0;
6601 }
David S. Miller7c4ec742018-07-20 23:37:55 -07006602 napi->gro_bitmask = 0;
6603}
6604
6605void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6606 int (*poll)(struct napi_struct *, int), int weight)
6607{
6608 INIT_LIST_HEAD(&napi->poll_list);
6609 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6610 napi->timer.function = napi_watchdog;
6611 init_gro_hash(napi);
Herbert Xu5d38a072009-01-04 16:13:40 -08006612 napi->skb = NULL;
Edward Cree323ebb62019-08-06 14:53:55 +01006613 INIT_LIST_HEAD(&napi->rx_list);
6614 napi->rx_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08006615 napi->poll = poll;
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00006616 if (weight > NAPI_POLL_WEIGHT)
Qian Caibf29e9e2018-12-01 21:11:19 -05006617 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6618 weight);
Herbert Xud565b0a2008-12-15 23:38:52 -08006619 napi->weight = weight;
6620 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08006621 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08006622#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08006623 napi->poll_owner = -1;
6624#endif
6625 set_bit(NAPI_STATE_SCHED, &napi->state);
Eric Dumazet93d05d42015-11-18 06:31:03 -08006626 napi_hash_add(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08006627}
6628EXPORT_SYMBOL(netif_napi_add);
6629
Eric Dumazet3b47d302014-11-06 21:09:44 -08006630void napi_disable(struct napi_struct *n)
6631{
6632 might_sleep();
6633 set_bit(NAPI_STATE_DISABLE, &n->state);
6634
6635 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6636 msleep(1);
Neil Horman2d8bff1262015-09-23 14:57:58 -04006637 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6638 msleep(1);
Eric Dumazet3b47d302014-11-06 21:09:44 -08006639
6640 hrtimer_cancel(&n->timer);
6641
6642 clear_bit(NAPI_STATE_DISABLE, &n->state);
6643}
6644EXPORT_SYMBOL(napi_disable);
6645
David Miller07d78362018-06-24 14:14:02 +09006646static void flush_gro_hash(struct napi_struct *napi)
David Millerd4546c22018-06-24 14:13:49 +09006647{
David Miller07d78362018-06-24 14:14:02 +09006648 int i;
David Millerd4546c22018-06-24 14:13:49 +09006649
David Miller07d78362018-06-24 14:14:02 +09006650 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6651 struct sk_buff *skb, *n;
6652
Li RongQing6312fe72018-07-05 14:34:32 +08006653 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
David Miller07d78362018-06-24 14:14:02 +09006654 kfree_skb(skb);
Li RongQing6312fe72018-07-05 14:34:32 +08006655 napi->gro_hash[i].count = 0;
David Miller07d78362018-06-24 14:14:02 +09006656 }
David Millerd4546c22018-06-24 14:13:49 +09006657}
6658
Eric Dumazet93d05d42015-11-18 06:31:03 -08006659/* Must be called in process context */
Herbert Xud565b0a2008-12-15 23:38:52 -08006660void netif_napi_del(struct napi_struct *napi)
6661{
Eric Dumazet93d05d42015-11-18 06:31:03 -08006662 might_sleep();
6663 if (napi_hash_del(napi))
6664 synchronize_net();
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08006665 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07006666 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08006667
David Miller07d78362018-06-24 14:14:02 +09006668 flush_gro_hash(napi);
Li RongQingd9f37d02018-07-13 14:41:36 +08006669 napi->gro_bitmask = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08006670}
6671EXPORT_SYMBOL(netif_napi_del);
6672
Herbert Xu726ce702014-12-21 07:16:21 +11006673static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6674{
6675 void *have;
6676 int work, weight;
6677
6678 list_del_init(&n->poll_list);
6679
6680 have = netpoll_poll_lock(n);
6681
6682 weight = n->weight;
6683
6684 /* This NAPI_STATE_SCHED test is for avoiding a race
6685 * with netpoll's poll_napi(). Only the entity which
6686 * obtains the lock and sees NAPI_STATE_SCHED set will
6687 * actually make the ->poll() call. Therefore we avoid
6688 * accidentally calling ->poll() when NAPI is not scheduled.
6689 */
6690 work = 0;
6691 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6692 work = n->poll(n, weight);
Jesper Dangaard Brouer1db19db2016-07-07 18:01:32 +02006693 trace_napi_poll(n, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11006694 }
6695
Eric Dumazet427d5832020-06-17 09:40:51 -07006696 if (unlikely(work > weight))
6697 pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6698 n->poll, work, weight);
Herbert Xu726ce702014-12-21 07:16:21 +11006699
6700 if (likely(work < weight))
6701 goto out_unlock;
6702
6703 /* Drivers must not modify the NAPI state if they
6704 * consume the entire weight. In such cases this code
6705 * still "owns" the NAPI instance and therefore can
6706 * move the instance around on the list at-will.
6707 */
6708 if (unlikely(napi_disable_pending(n))) {
6709 napi_complete(n);
6710 goto out_unlock;
6711 }
6712
Li RongQingd9f37d02018-07-13 14:41:36 +08006713 if (n->gro_bitmask) {
Herbert Xu726ce702014-12-21 07:16:21 +11006714 /* flush too old packets
6715 * If HZ < 1000, flush all packets.
6716 */
6717 napi_gro_flush(n, HZ >= 1000);
6718 }
6719
Maxim Mikityanskiyc8079432020-01-21 15:09:40 +00006720 gro_normal_list(n);
6721
Herbert Xu001ce542014-12-21 07:16:22 +11006722 /* Some drivers may have called napi_schedule
6723 * prior to exhausting their budget.
6724 */
6725 if (unlikely(!list_empty(&n->poll_list))) {
6726 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6727 n->dev ? n->dev->name : "backlog");
6728 goto out_unlock;
6729 }
6730
Herbert Xu726ce702014-12-21 07:16:21 +11006731 list_add_tail(&n->poll_list, repoll);
6732
6733out_unlock:
6734 netpoll_poll_unlock(have);
6735
6736 return work;
6737}
6738
Emese Revfy0766f782016-06-20 20:42:34 +02006739static __latent_entropy void net_rx_action(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740{
Christoph Lameter903ceff2014-08-17 12:30:35 -05006741 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04006742 unsigned long time_limit = jiffies +
6743 usecs_to_jiffies(netdev_budget_usecs);
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07006744 int budget = netdev_budget;
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006745 LIST_HEAD(list);
6746 LIST_HEAD(repoll);
Matt Mackall53fb95d2005-08-11 19:27:43 -07006747
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748 local_irq_disable();
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006749 list_splice_init(&sd->poll_list, &list);
6750 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006751
Herbert Xuceb8d5b2014-12-21 07:16:25 +11006752 for (;;) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006753 struct napi_struct *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006754
Herbert Xuceb8d5b2014-12-21 07:16:25 +11006755 if (list_empty(&list)) {
6756 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
Eric Dumazetf52dffe2016-11-23 08:44:56 -08006757 goto out;
Herbert Xuceb8d5b2014-12-21 07:16:25 +11006758 break;
6759 }
6760
Herbert Xu6bd373e2014-12-21 07:16:24 +11006761 n = list_first_entry(&list, struct napi_struct, poll_list);
6762 budget -= napi_poll(n, &repoll);
6763
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006764 /* If softirq window is exhausted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08006765 * Allow this to run for 2 jiffies since which will allow
6766 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006767 */
Herbert Xuceb8d5b2014-12-21 07:16:25 +11006768 if (unlikely(budget <= 0 ||
6769 time_after_eq(jiffies, time_limit))) {
6770 sd->time_squeeze++;
6771 break;
6772 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773 }
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006774
Eric Dumazetd75b1ad2014-11-02 06:19:33 -08006775 local_irq_disable();
6776
6777 list_splice_tail_init(&sd->poll_list, &list);
6778 list_splice_tail(&repoll, &list);
6779 list_splice(&list, &sd->poll_list);
6780 if (!list_empty(&sd->poll_list))
6781 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6782
Eric Dumazete326bed2010-04-22 00:22:45 -07006783 net_rps_action_and_irq_enable(sd);
Eric Dumazetf52dffe2016-11-23 08:44:56 -08006784out:
6785 __kfree_skb_flush();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006786}
6787
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02006788struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006789 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006790
6791 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006792 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006793
Taehee Yoo32b6d342019-10-21 18:47:56 +00006794 /* lookup ignore flag */
6795 bool ignore;
6796
Veaceslav Falico5d261912013-08-28 23:25:05 +02006797 /* counter for the number of times this device was added to us */
6798 u16 ref_nr;
6799
Veaceslav Falico402dae92013-09-25 09:20:09 +02006800 /* private field for the users */
6801 void *private;
6802
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006803 struct list_head list;
6804 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006805};
6806
Michal Kubeček6ea29da2015-09-24 10:59:05 +02006807static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006808 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006809{
Veaceslav Falico5d261912013-08-28 23:25:05 +02006810 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02006811
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006812 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02006813 if (adj->dev == adj_dev)
6814 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006815 }
6816 return NULL;
6817}
6818
Taehee Yoo32b6d342019-10-21 18:47:56 +00006819static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
David Ahernf1170fd2016-10-17 19:15:51 -07006820{
6821 struct net_device *dev = data;
6822
6823 return upper_dev == dev;
6824}
6825
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006826/**
6827 * netdev_has_upper_dev - Check if device is linked to an upper device
6828 * @dev: device
6829 * @upper_dev: upper device to check
6830 *
6831 * Find out if a device is linked to specified upper device and return true
6832 * in case it is. Note that this checks only immediate upper device,
6833 * not through a complete stack of devices. The caller must hold the RTNL lock.
6834 */
6835bool netdev_has_upper_dev(struct net_device *dev,
6836 struct net_device *upper_dev)
6837{
6838 ASSERT_RTNL();
6839
Taehee Yoo32b6d342019-10-21 18:47:56 +00006840 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
David Ahernf1170fd2016-10-17 19:15:51 -07006841 upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006842}
6843EXPORT_SYMBOL(netdev_has_upper_dev);
6844
6845/**
David Ahern1a3f0602016-10-17 19:15:44 -07006846 * netdev_has_upper_dev_all - Check if device is linked to an upper device
6847 * @dev: device
6848 * @upper_dev: upper device to check
6849 *
6850 * Find out if a device is linked to specified upper device and return true
6851 * in case it is. Note that this checks the entire upper device chain.
6852 * The caller must hold rcu lock.
6853 */
6854
David Ahern1a3f0602016-10-17 19:15:44 -07006855bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6856 struct net_device *upper_dev)
6857{
Taehee Yoo32b6d342019-10-21 18:47:56 +00006858 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
David Ahern1a3f0602016-10-17 19:15:44 -07006859 upper_dev);
6860}
6861EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6862
6863/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006864 * netdev_has_any_upper_dev - Check if device is linked to some device
6865 * @dev: device
6866 *
6867 * Find out if a device is linked to an upper device and return true in case
6868 * it is. The caller must hold the RTNL lock.
6869 */
Ido Schimmel25cc72a2017-09-01 10:52:31 +02006870bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006871{
6872 ASSERT_RTNL();
6873
David Ahernf1170fd2016-10-17 19:15:51 -07006874 return !list_empty(&dev->adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006875}
Ido Schimmel25cc72a2017-09-01 10:52:31 +02006876EXPORT_SYMBOL(netdev_has_any_upper_dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006877
6878/**
6879 * netdev_master_upper_dev_get - Get master upper device
6880 * @dev: device
6881 *
6882 * Find a master upper device and return pointer to it or NULL in case
6883 * it's not there. The caller must hold the RTNL lock.
6884 */
6885struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6886{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02006887 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006888
6889 ASSERT_RTNL();
6890
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006891 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006892 return NULL;
6893
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006894 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02006895 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00006896 if (likely(upper->master))
6897 return upper->dev;
6898 return NULL;
6899}
6900EXPORT_SYMBOL(netdev_master_upper_dev_get);
6901
Taehee Yoo32b6d342019-10-21 18:47:56 +00006902static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6903{
6904 struct netdev_adjacent *upper;
6905
6906 ASSERT_RTNL();
6907
6908 if (list_empty(&dev->adj_list.upper))
6909 return NULL;
6910
6911 upper = list_first_entry(&dev->adj_list.upper,
6912 struct netdev_adjacent, list);
6913 if (likely(upper->master) && !upper->ignore)
6914 return upper->dev;
6915 return NULL;
6916}
6917
David Ahern0f524a82016-10-17 19:15:52 -07006918/**
6919 * netdev_has_any_lower_dev - Check if device is linked to some device
6920 * @dev: device
6921 *
6922 * Find out if a device is linked to a lower device and return true in case
6923 * it is. The caller must hold the RTNL lock.
6924 */
6925static bool netdev_has_any_lower_dev(struct net_device *dev)
6926{
6927 ASSERT_RTNL();
6928
6929 return !list_empty(&dev->adj_list.lower);
6930}
6931
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02006932void *netdev_adjacent_get_private(struct list_head *adj_list)
6933{
6934 struct netdev_adjacent *adj;
6935
6936 adj = list_entry(adj_list, struct netdev_adjacent, list);
6937
6938 return adj->private;
6939}
6940EXPORT_SYMBOL(netdev_adjacent_get_private);
6941
Veaceslav Falico31088a12013-09-25 09:20:12 +02006942/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04006943 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6944 * @dev: device
6945 * @iter: list_head ** of the current position
6946 *
6947 * Gets the next device from the dev's upper list, starting from iter
6948 * position. The caller must hold RCU read lock.
6949 */
6950struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6951 struct list_head **iter)
6952{
6953 struct netdev_adjacent *upper;
6954
6955 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6956
6957 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6958
6959 if (&upper->list == &dev->adj_list.upper)
6960 return NULL;
6961
6962 *iter = &upper->list;
6963
6964 return upper->dev;
6965}
6966EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6967
Taehee Yoo32b6d342019-10-21 18:47:56 +00006968static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6969 struct list_head **iter,
6970 bool *ignore)
Taehee Yoo5343da42019-10-21 18:47:50 +00006971{
6972 struct netdev_adjacent *upper;
6973
6974 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6975
6976 if (&upper->list == &dev->adj_list.upper)
6977 return NULL;
6978
6979 *iter = &upper->list;
Taehee Yoo32b6d342019-10-21 18:47:56 +00006980 *ignore = upper->ignore;
Taehee Yoo5343da42019-10-21 18:47:50 +00006981
6982 return upper->dev;
6983}
6984
David Ahern1a3f0602016-10-17 19:15:44 -07006985static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6986 struct list_head **iter)
6987{
6988 struct netdev_adjacent *upper;
6989
6990 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6991
6992 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6993
6994 if (&upper->list == &dev->adj_list.upper)
6995 return NULL;
6996
6997 *iter = &upper->list;
6998
6999 return upper->dev;
7000}
7001
Taehee Yoo32b6d342019-10-21 18:47:56 +00007002static int __netdev_walk_all_upper_dev(struct net_device *dev,
7003 int (*fn)(struct net_device *dev,
7004 void *data),
7005 void *data)
Taehee Yoo5343da42019-10-21 18:47:50 +00007006{
7007 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7008 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7009 int ret, cur = 0;
Taehee Yoo32b6d342019-10-21 18:47:56 +00007010 bool ignore;
Taehee Yoo5343da42019-10-21 18:47:50 +00007011
7012 now = dev;
7013 iter = &dev->adj_list.upper;
7014
7015 while (1) {
7016 if (now != dev) {
7017 ret = fn(now, data);
7018 if (ret)
7019 return ret;
7020 }
7021
7022 next = NULL;
7023 while (1) {
Taehee Yoo32b6d342019-10-21 18:47:56 +00007024 udev = __netdev_next_upper_dev(now, &iter, &ignore);
Taehee Yoo5343da42019-10-21 18:47:50 +00007025 if (!udev)
7026 break;
Taehee Yoo32b6d342019-10-21 18:47:56 +00007027 if (ignore)
7028 continue;
Taehee Yoo5343da42019-10-21 18:47:50 +00007029
7030 next = udev;
7031 niter = &udev->adj_list.upper;
7032 dev_stack[cur] = now;
7033 iter_stack[cur++] = iter;
7034 break;
7035 }
7036
7037 if (!next) {
7038 if (!cur)
7039 return 0;
7040 next = dev_stack[--cur];
7041 niter = iter_stack[cur];
7042 }
7043
7044 now = next;
7045 iter = niter;
7046 }
7047
7048 return 0;
7049}
7050
David Ahern1a3f0602016-10-17 19:15:44 -07007051int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7052 int (*fn)(struct net_device *dev,
7053 void *data),
7054 void *data)
7055{
Taehee Yoo5343da42019-10-21 18:47:50 +00007056 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7057 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7058 int ret, cur = 0;
David Ahern1a3f0602016-10-17 19:15:44 -07007059
Taehee Yoo5343da42019-10-21 18:47:50 +00007060 now = dev;
7061 iter = &dev->adj_list.upper;
David Ahern1a3f0602016-10-17 19:15:44 -07007062
Taehee Yoo5343da42019-10-21 18:47:50 +00007063 while (1) {
7064 if (now != dev) {
7065 ret = fn(now, data);
7066 if (ret)
7067 return ret;
7068 }
7069
7070 next = NULL;
7071 while (1) {
7072 udev = netdev_next_upper_dev_rcu(now, &iter);
7073 if (!udev)
7074 break;
7075
7076 next = udev;
7077 niter = &udev->adj_list.upper;
7078 dev_stack[cur] = now;
7079 iter_stack[cur++] = iter;
7080 break;
7081 }
7082
7083 if (!next) {
7084 if (!cur)
7085 return 0;
7086 next = dev_stack[--cur];
7087 niter = iter_stack[cur];
7088 }
7089
7090 now = next;
7091 iter = niter;
David Ahern1a3f0602016-10-17 19:15:44 -07007092 }
7093
7094 return 0;
7095}
7096EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7097
Taehee Yoo32b6d342019-10-21 18:47:56 +00007098static bool __netdev_has_upper_dev(struct net_device *dev,
7099 struct net_device *upper_dev)
7100{
7101 ASSERT_RTNL();
7102
7103 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7104 upper_dev);
7105}
7106
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007107/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02007108 * netdev_lower_get_next_private - Get the next ->private from the
7109 * lower neighbour list
7110 * @dev: device
7111 * @iter: list_head ** of the current position
7112 *
7113 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7114 * list, starting from iter position. The caller must hold either hold the
7115 * RTNL lock or its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00007116 * list will remain unchanged.
Veaceslav Falico31088a12013-09-25 09:20:12 +02007117 */
7118void *netdev_lower_get_next_private(struct net_device *dev,
7119 struct list_head **iter)
7120{
7121 struct netdev_adjacent *lower;
7122
7123 lower = list_entry(*iter, struct netdev_adjacent, list);
7124
7125 if (&lower->list == &dev->adj_list.lower)
7126 return NULL;
7127
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02007128 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02007129
7130 return lower->private;
7131}
7132EXPORT_SYMBOL(netdev_lower_get_next_private);
7133
7134/**
7135 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7136 * lower neighbour list, RCU
7137 * variant
7138 * @dev: device
7139 * @iter: list_head ** of the current position
7140 *
7141 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7142 * list, starting from iter position. The caller must hold RCU read lock.
7143 */
7144void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7145 struct list_head **iter)
7146{
7147 struct netdev_adjacent *lower;
7148
7149 WARN_ON_ONCE(!rcu_read_lock_held());
7150
7151 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7152
7153 if (&lower->list == &dev->adj_list.lower)
7154 return NULL;
7155
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02007156 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02007157
7158 return lower->private;
7159}
7160EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7161
7162/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04007163 * netdev_lower_get_next - Get the next device from the lower neighbour
7164 * list
7165 * @dev: device
7166 * @iter: list_head ** of the current position
7167 *
7168 * Gets the next netdev_adjacent from the dev's lower neighbour
7169 * list, starting from iter position. The caller must hold RTNL lock or
7170 * its own locking that guarantees that the neighbour lower
subashab@codeaurora.orgb4691392015-07-24 03:03:29 +00007171 * list will remain unchanged.
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04007172 */
7173void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7174{
7175 struct netdev_adjacent *lower;
7176
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01007177 lower = list_entry(*iter, struct netdev_adjacent, list);
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04007178
7179 if (&lower->list == &dev->adj_list.lower)
7180 return NULL;
7181
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01007182 *iter = lower->list.next;
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04007183
7184 return lower->dev;
7185}
7186EXPORT_SYMBOL(netdev_lower_get_next);
7187
David Ahern1a3f0602016-10-17 19:15:44 -07007188static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7189 struct list_head **iter)
7190{
7191 struct netdev_adjacent *lower;
7192
David Ahern46b5ab12016-10-26 13:21:33 -07007193 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
David Ahern1a3f0602016-10-17 19:15:44 -07007194
7195 if (&lower->list == &dev->adj_list.lower)
7196 return NULL;
7197
David Ahern46b5ab12016-10-26 13:21:33 -07007198 *iter = &lower->list;
David Ahern1a3f0602016-10-17 19:15:44 -07007199
7200 return lower->dev;
7201}
7202
Taehee Yoo32b6d342019-10-21 18:47:56 +00007203static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7204 struct list_head **iter,
7205 bool *ignore)
7206{
7207 struct netdev_adjacent *lower;
7208
7209 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7210
7211 if (&lower->list == &dev->adj_list.lower)
7212 return NULL;
7213
7214 *iter = &lower->list;
7215 *ignore = lower->ignore;
7216
7217 return lower->dev;
7218}
7219
David Ahern1a3f0602016-10-17 19:15:44 -07007220int netdev_walk_all_lower_dev(struct net_device *dev,
7221 int (*fn)(struct net_device *dev,
7222 void *data),
7223 void *data)
7224{
Taehee Yoo5343da42019-10-21 18:47:50 +00007225 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7226 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7227 int ret, cur = 0;
David Ahern1a3f0602016-10-17 19:15:44 -07007228
Taehee Yoo5343da42019-10-21 18:47:50 +00007229 now = dev;
7230 iter = &dev->adj_list.lower;
David Ahern1a3f0602016-10-17 19:15:44 -07007231
Taehee Yoo5343da42019-10-21 18:47:50 +00007232 while (1) {
7233 if (now != dev) {
7234 ret = fn(now, data);
7235 if (ret)
7236 return ret;
7237 }
7238
7239 next = NULL;
7240 while (1) {
7241 ldev = netdev_next_lower_dev(now, &iter);
7242 if (!ldev)
7243 break;
7244
7245 next = ldev;
7246 niter = &ldev->adj_list.lower;
7247 dev_stack[cur] = now;
7248 iter_stack[cur++] = iter;
7249 break;
7250 }
7251
7252 if (!next) {
7253 if (!cur)
7254 return 0;
7255 next = dev_stack[--cur];
7256 niter = iter_stack[cur];
7257 }
7258
7259 now = next;
7260 iter = niter;
David Ahern1a3f0602016-10-17 19:15:44 -07007261 }
7262
7263 return 0;
7264}
7265EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7266
Taehee Yoo32b6d342019-10-21 18:47:56 +00007267static int __netdev_walk_all_lower_dev(struct net_device *dev,
7268 int (*fn)(struct net_device *dev,
7269 void *data),
7270 void *data)
7271{
7272 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7273 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7274 int ret, cur = 0;
7275 bool ignore;
7276
7277 now = dev;
7278 iter = &dev->adj_list.lower;
7279
7280 while (1) {
7281 if (now != dev) {
7282 ret = fn(now, data);
7283 if (ret)
7284 return ret;
7285 }
7286
7287 next = NULL;
7288 while (1) {
7289 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7290 if (!ldev)
7291 break;
7292 if (ignore)
7293 continue;
7294
7295 next = ldev;
7296 niter = &ldev->adj_list.lower;
7297 dev_stack[cur] = now;
7298 iter_stack[cur++] = iter;
7299 break;
7300 }
7301
7302 if (!next) {
7303 if (!cur)
7304 return 0;
7305 next = dev_stack[--cur];
7306 niter = iter_stack[cur];
7307 }
7308
7309 now = next;
7310 iter = niter;
7311 }
7312
7313 return 0;
7314}
7315
Taehee Yoo7151aff2020-02-15 10:50:21 +00007316struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7317 struct list_head **iter)
David Ahern1a3f0602016-10-17 19:15:44 -07007318{
7319 struct netdev_adjacent *lower;
7320
7321 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7322 if (&lower->list == &dev->adj_list.lower)
7323 return NULL;
7324
7325 *iter = &lower->list;
7326
7327 return lower->dev;
7328}
Taehee Yoo7151aff2020-02-15 10:50:21 +00007329EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
David Ahern1a3f0602016-10-17 19:15:44 -07007330
Taehee Yoo5343da42019-10-21 18:47:50 +00007331static u8 __netdev_upper_depth(struct net_device *dev)
7332{
7333 struct net_device *udev;
7334 struct list_head *iter;
7335 u8 max_depth = 0;
Taehee Yoo32b6d342019-10-21 18:47:56 +00007336 bool ignore;
Taehee Yoo5343da42019-10-21 18:47:50 +00007337
7338 for (iter = &dev->adj_list.upper,
Taehee Yoo32b6d342019-10-21 18:47:56 +00007339 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
Taehee Yoo5343da42019-10-21 18:47:50 +00007340 udev;
Taehee Yoo32b6d342019-10-21 18:47:56 +00007341 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7342 if (ignore)
7343 continue;
Taehee Yoo5343da42019-10-21 18:47:50 +00007344 if (max_depth < udev->upper_level)
7345 max_depth = udev->upper_level;
7346 }
7347
7348 return max_depth;
7349}
7350
7351static u8 __netdev_lower_depth(struct net_device *dev)
7352{
7353 struct net_device *ldev;
7354 struct list_head *iter;
7355 u8 max_depth = 0;
Taehee Yoo32b6d342019-10-21 18:47:56 +00007356 bool ignore;
Taehee Yoo5343da42019-10-21 18:47:50 +00007357
7358 for (iter = &dev->adj_list.lower,
Taehee Yoo32b6d342019-10-21 18:47:56 +00007359 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
Taehee Yoo5343da42019-10-21 18:47:50 +00007360 ldev;
Taehee Yoo32b6d342019-10-21 18:47:56 +00007361 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7362 if (ignore)
7363 continue;
Taehee Yoo5343da42019-10-21 18:47:50 +00007364 if (max_depth < ldev->lower_level)
7365 max_depth = ldev->lower_level;
7366 }
7367
7368 return max_depth;
7369}
7370
7371static int __netdev_update_upper_level(struct net_device *dev, void *data)
7372{
7373 dev->upper_level = __netdev_upper_depth(dev) + 1;
7374 return 0;
7375}
7376
7377static int __netdev_update_lower_level(struct net_device *dev, void *data)
7378{
7379 dev->lower_level = __netdev_lower_depth(dev) + 1;
7380 return 0;
7381}
7382
David Ahern1a3f0602016-10-17 19:15:44 -07007383int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7384 int (*fn)(struct net_device *dev,
7385 void *data),
7386 void *data)
7387{
Taehee Yoo5343da42019-10-21 18:47:50 +00007388 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7389 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7390 int ret, cur = 0;
David Ahern1a3f0602016-10-17 19:15:44 -07007391
Taehee Yoo5343da42019-10-21 18:47:50 +00007392 now = dev;
7393 iter = &dev->adj_list.lower;
David Ahern1a3f0602016-10-17 19:15:44 -07007394
Taehee Yoo5343da42019-10-21 18:47:50 +00007395 while (1) {
7396 if (now != dev) {
7397 ret = fn(now, data);
7398 if (ret)
7399 return ret;
7400 }
7401
7402 next = NULL;
7403 while (1) {
7404 ldev = netdev_next_lower_dev_rcu(now, &iter);
7405 if (!ldev)
7406 break;
7407
7408 next = ldev;
7409 niter = &ldev->adj_list.lower;
7410 dev_stack[cur] = now;
7411 iter_stack[cur++] = iter;
7412 break;
7413 }
7414
7415 if (!next) {
7416 if (!cur)
7417 return 0;
7418 next = dev_stack[--cur];
7419 niter = iter_stack[cur];
7420 }
7421
7422 now = next;
7423 iter = niter;
David Ahern1a3f0602016-10-17 19:15:44 -07007424 }
7425
7426 return 0;
7427}
7428EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7429
Jiri Pirko7ce856a2016-07-04 08:23:12 +02007430/**
dingtianhonge001bfa2013-12-13 10:19:55 +08007431 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7432 * lower neighbour list, RCU
7433 * variant
7434 * @dev: device
7435 *
7436 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7437 * list. The caller must hold RCU read lock.
7438 */
7439void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7440{
7441 struct netdev_adjacent *lower;
7442
7443 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7444 struct netdev_adjacent, list);
7445 if (lower)
7446 return lower->private;
7447 return NULL;
7448}
7449EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7450
7451/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007452 * netdev_master_upper_dev_get_rcu - Get master upper device
7453 * @dev: device
7454 *
7455 * Find a master upper device and return pointer to it or NULL in case
7456 * it's not there. The caller must hold the RCU read lock.
7457 */
7458struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7459{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02007460 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007461
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007462 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02007463 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007464 if (upper && likely(upper->master))
7465 return upper->dev;
7466 return NULL;
7467}
7468EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7469
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05307470static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007471 struct net_device *adj_dev,
7472 struct list_head *dev_list)
7473{
7474 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11007475
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007476 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7477 "upper_%s" : "lower_%s", adj_dev->name);
7478 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7479 linkname);
7480}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05307481static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007482 char *name,
7483 struct list_head *dev_list)
7484{
7485 char linkname[IFNAMSIZ+7];
tchardingf4563a72017-02-09 17:56:07 +11007486
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007487 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7488 "upper_%s" : "lower_%s", name);
7489 sysfs_remove_link(&(dev->dev.kobj), linkname);
7490}
7491
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04007492static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7493 struct net_device *adj_dev,
7494 struct list_head *dev_list)
7495{
7496 return (dev_list == &dev->adj_list.upper ||
7497 dev_list == &dev->adj_list.lower) &&
7498 net_eq(dev_net(dev), dev_net(adj_dev));
7499}
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007500
Veaceslav Falico5d261912013-08-28 23:25:05 +02007501static int __netdev_adjacent_dev_insert(struct net_device *dev,
7502 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02007503 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02007504 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02007505{
7506 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007507 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02007508
Michal Kubeček6ea29da2015-09-24 10:59:05 +02007509 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007510
7511 if (adj) {
David Ahern790510d2016-10-17 19:15:43 -07007512 adj->ref_nr += 1;
David Ahern67b62f92016-10-17 19:15:53 -07007513 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7514 dev->name, adj_dev->name, adj->ref_nr);
7515
Veaceslav Falico5d261912013-08-28 23:25:05 +02007516 return 0;
7517 }
7518
7519 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7520 if (!adj)
7521 return -ENOMEM;
7522
7523 adj->dev = adj_dev;
7524 adj->master = master;
David Ahern790510d2016-10-17 19:15:43 -07007525 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02007526 adj->private = private;
Taehee Yoo32b6d342019-10-21 18:47:56 +00007527 adj->ignore = false;
Veaceslav Falico5d261912013-08-28 23:25:05 +02007528 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007529
David Ahern67b62f92016-10-17 19:15:53 -07007530 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7531 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007532
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04007533 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007534 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02007535 if (ret)
7536 goto free_adj;
7537 }
7538
Veaceslav Falico7863c052013-09-25 09:20:06 +02007539 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007540 if (master) {
7541 ret = sysfs_create_link(&(dev->dev.kobj),
7542 &(adj_dev->dev.kobj), "master");
7543 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02007544 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007545
Veaceslav Falico7863c052013-09-25 09:20:06 +02007546 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007547 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02007548 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007549 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02007550
7551 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007552
Veaceslav Falico5831d662013-09-25 09:20:32 +02007553remove_symlinks:
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04007554 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007555 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007556free_adj:
7557 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02007558 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007559
7560 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02007561}
7562
stephen hemminger1d143d92013-12-29 14:01:29 -08007563static void __netdev_adjacent_dev_remove(struct net_device *dev,
7564 struct net_device *adj_dev,
Andrew Collins93409032016-10-03 13:43:02 -06007565 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08007566 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02007567{
7568 struct netdev_adjacent *adj;
7569
David Ahern67b62f92016-10-17 19:15:53 -07007570 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7571 dev->name, adj_dev->name, ref_nr);
7572
Michal Kubeček6ea29da2015-09-24 10:59:05 +02007573 adj = __netdev_find_adj(adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007574
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007575 if (!adj) {
David Ahern67b62f92016-10-17 19:15:53 -07007576 pr_err("Adjacency does not exist for device %s from %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007577 dev->name, adj_dev->name);
David Ahern67b62f92016-10-17 19:15:53 -07007578 WARN_ON(1);
7579 return;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007580 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02007581
Andrew Collins93409032016-10-03 13:43:02 -06007582 if (adj->ref_nr > ref_nr) {
David Ahern67b62f92016-10-17 19:15:53 -07007583 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7584 dev->name, adj_dev->name, ref_nr,
7585 adj->ref_nr - ref_nr);
Andrew Collins93409032016-10-03 13:43:02 -06007586 adj->ref_nr -= ref_nr;
Veaceslav Falico5d261912013-08-28 23:25:05 +02007587 return;
7588 }
7589
Veaceslav Falico842d67a2013-09-25 09:20:31 +02007590 if (adj->master)
7591 sysfs_remove_link(&(dev->dev.kobj), "master");
7592
Alexander Y. Fomichev7ce64c72014-09-15 14:22:35 +04007593 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
Veaceslav Falico3ee32702014-01-14 21:58:50 +01007594 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d662013-09-25 09:20:32 +02007595
Veaceslav Falico5d261912013-08-28 23:25:05 +02007596 list_del_rcu(&adj->list);
David Ahern67b62f92016-10-17 19:15:53 -07007597 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007598 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007599 dev_put(adj_dev);
7600 kfree_rcu(adj, rcu);
7601}
7602
stephen hemminger1d143d92013-12-29 14:01:29 -08007603static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7604 struct net_device *upper_dev,
7605 struct list_head *up_list,
7606 struct list_head *down_list,
7607 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02007608{
7609 int ret;
7610
David Ahern790510d2016-10-17 19:15:43 -07007611 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
Andrew Collins93409032016-10-03 13:43:02 -06007612 private, master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007613 if (ret)
7614 return ret;
7615
David Ahern790510d2016-10-17 19:15:43 -07007616 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
Andrew Collins93409032016-10-03 13:43:02 -06007617 private, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007618 if (ret) {
David Ahern790510d2016-10-17 19:15:43 -07007619 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007620 return ret;
7621 }
7622
7623 return 0;
7624}
7625
stephen hemminger1d143d92013-12-29 14:01:29 -08007626static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7627 struct net_device *upper_dev,
Andrew Collins93409032016-10-03 13:43:02 -06007628 u16 ref_nr,
stephen hemminger1d143d92013-12-29 14:01:29 -08007629 struct list_head *up_list,
7630 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02007631{
Andrew Collins93409032016-10-03 13:43:02 -06007632 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7633 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007634}
7635
stephen hemminger1d143d92013-12-29 14:01:29 -08007636static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7637 struct net_device *upper_dev,
7638 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007639{
David Ahernf1170fd2016-10-17 19:15:51 -07007640 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7641 &dev->adj_list.upper,
7642 &upper_dev->adj_list.lower,
7643 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007644}
7645
stephen hemminger1d143d92013-12-29 14:01:29 -08007646static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7647 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007648{
Andrew Collins93409032016-10-03 13:43:02 -06007649 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007650 &dev->adj_list.upper,
7651 &upper_dev->adj_list.lower);
7652}
Veaceslav Falico5d261912013-08-28 23:25:05 +02007653
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007654static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02007655 struct net_device *upper_dev, bool master,
David Ahern42ab19e2017-10-04 17:48:47 -07007656 void *upper_priv, void *upper_info,
7657 struct netlink_ext_ack *extack)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007658{
David Ahern51d0c0472017-10-04 17:48:45 -07007659 struct netdev_notifier_changeupper_info changeupper_info = {
7660 .info = {
7661 .dev = dev,
David Ahern42ab19e2017-10-04 17:48:47 -07007662 .extack = extack,
David Ahern51d0c0472017-10-04 17:48:45 -07007663 },
7664 .upper_dev = upper_dev,
7665 .master = master,
7666 .linking = true,
7667 .upper_info = upper_info,
7668 };
Mike Manning50d629e2018-02-26 23:49:30 +00007669 struct net_device *master_dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02007670 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007671
7672 ASSERT_RTNL();
7673
7674 if (dev == upper_dev)
7675 return -EBUSY;
7676
7677 /* To prevent loops, check if dev is not upper device to upper_dev. */
Taehee Yoo32b6d342019-10-21 18:47:56 +00007678 if (__netdev_has_upper_dev(upper_dev, dev))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007679 return -EBUSY;
7680
Taehee Yoo5343da42019-10-21 18:47:50 +00007681 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7682 return -EMLINK;
7683
Mike Manning50d629e2018-02-26 23:49:30 +00007684 if (!master) {
Taehee Yoo32b6d342019-10-21 18:47:56 +00007685 if (__netdev_has_upper_dev(dev, upper_dev))
Mike Manning50d629e2018-02-26 23:49:30 +00007686 return -EEXIST;
7687 } else {
Taehee Yoo32b6d342019-10-21 18:47:56 +00007688 master_dev = __netdev_master_upper_dev_get(dev);
Mike Manning50d629e2018-02-26 23:49:30 +00007689 if (master_dev)
7690 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7691 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007692
David Ahern51d0c0472017-10-04 17:48:45 -07007693 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
Jiri Pirko573c7ba2015-10-16 14:01:22 +02007694 &changeupper_info.info);
7695 ret = notifier_to_errno(ret);
7696 if (ret)
7697 return ret;
7698
Jiri Pirko6dffb042015-12-03 12:12:10 +01007699 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
Veaceslav Falico402dae92013-09-25 09:20:09 +02007700 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007701 if (ret)
7702 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007703
David Ahern51d0c0472017-10-04 17:48:45 -07007704 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
Ido Schimmelb03804e2015-12-03 12:12:03 +01007705 &changeupper_info.info);
7706 ret = notifier_to_errno(ret);
7707 if (ret)
David Ahernf1170fd2016-10-17 19:15:51 -07007708 goto rollback;
Ido Schimmelb03804e2015-12-03 12:12:03 +01007709
Taehee Yoo5343da42019-10-21 18:47:50 +00007710 __netdev_update_upper_level(dev, NULL);
Taehee Yoo32b6d342019-10-21 18:47:56 +00007711 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
Taehee Yoo5343da42019-10-21 18:47:50 +00007712
7713 __netdev_update_lower_level(upper_dev, NULL);
Taehee Yoo32b6d342019-10-21 18:47:56 +00007714 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7715 NULL);
Taehee Yoo5343da42019-10-21 18:47:50 +00007716
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007717 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02007718
David Ahernf1170fd2016-10-17 19:15:51 -07007719rollback:
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007720 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007721
7722 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007723}
7724
7725/**
7726 * netdev_upper_dev_link - Add a link to the upper device
7727 * @dev: device
7728 * @upper_dev: new upper device
Florian Fainelli7a006d52018-01-22 19:14:28 -08007729 * @extack: netlink extended ack
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007730 *
7731 * Adds a link to device which is upper to this one. The caller must hold
7732 * the RTNL lock. On a failure a negative errno code is returned.
7733 * On success the reference counts are adjusted and the function
7734 * returns zero.
7735 */
7736int netdev_upper_dev_link(struct net_device *dev,
David Ahern42ab19e2017-10-04 17:48:47 -07007737 struct net_device *upper_dev,
7738 struct netlink_ext_ack *extack)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007739{
David Ahern42ab19e2017-10-04 17:48:47 -07007740 return __netdev_upper_dev_link(dev, upper_dev, false,
7741 NULL, NULL, extack);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007742}
7743EXPORT_SYMBOL(netdev_upper_dev_link);
7744
7745/**
7746 * netdev_master_upper_dev_link - Add a master link to the upper device
7747 * @dev: device
7748 * @upper_dev: new upper device
Jiri Pirko6dffb042015-12-03 12:12:10 +01007749 * @upper_priv: upper device private
Jiri Pirko29bf24a2015-12-03 12:12:11 +01007750 * @upper_info: upper info to be passed down via notifier
Florian Fainelli7a006d52018-01-22 19:14:28 -08007751 * @extack: netlink extended ack
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007752 *
7753 * Adds a link to device which is upper to this one. In this case, only
7754 * one master upper device can be linked, although other non-master devices
7755 * might be linked as well. The caller must hold the RTNL lock.
7756 * On a failure a negative errno code is returned. On success the reference
7757 * counts are adjusted and the function returns zero.
7758 */
7759int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01007760 struct net_device *upper_dev,
David Ahern42ab19e2017-10-04 17:48:47 -07007761 void *upper_priv, void *upper_info,
7762 struct netlink_ext_ack *extack)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007763{
Jiri Pirko29bf24a2015-12-03 12:12:11 +01007764 return __netdev_upper_dev_link(dev, upper_dev, true,
David Ahern42ab19e2017-10-04 17:48:47 -07007765 upper_priv, upper_info, extack);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007766}
7767EXPORT_SYMBOL(netdev_master_upper_dev_link);
7768
7769/**
7770 * netdev_upper_dev_unlink - Removes a link to upper device
7771 * @dev: device
7772 * @upper_dev: new upper device
7773 *
7774 * Removes a link to device which is upper to this one. The caller must hold
7775 * the RTNL lock.
7776 */
7777void netdev_upper_dev_unlink(struct net_device *dev,
7778 struct net_device *upper_dev)
7779{
David Ahern51d0c0472017-10-04 17:48:45 -07007780 struct netdev_notifier_changeupper_info changeupper_info = {
7781 .info = {
7782 .dev = dev,
7783 },
7784 .upper_dev = upper_dev,
7785 .linking = false,
7786 };
tchardingf4563a72017-02-09 17:56:07 +11007787
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007788 ASSERT_RTNL();
7789
Jiri Pirko0e4ead92015-08-27 09:31:18 +02007790 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
Jiri Pirko0e4ead92015-08-27 09:31:18 +02007791
David Ahern51d0c0472017-10-04 17:48:45 -07007792 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
Jiri Pirko573c7ba2015-10-16 14:01:22 +02007793 &changeupper_info.info);
7794
Veaceslav Falico2f268f12013-09-25 09:20:07 +02007795 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02007796
David Ahern51d0c0472017-10-04 17:48:45 -07007797 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
Jiri Pirko0e4ead92015-08-27 09:31:18 +02007798 &changeupper_info.info);
Taehee Yoo5343da42019-10-21 18:47:50 +00007799
7800 __netdev_update_upper_level(dev, NULL);
Taehee Yoo32b6d342019-10-21 18:47:56 +00007801 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
Taehee Yoo5343da42019-10-21 18:47:50 +00007802
7803 __netdev_update_lower_level(upper_dev, NULL);
Taehee Yoo32b6d342019-10-21 18:47:56 +00007804 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7805 NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00007806}
7807EXPORT_SYMBOL(netdev_upper_dev_unlink);
7808
Taehee Yoo32b6d342019-10-21 18:47:56 +00007809static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7810 struct net_device *lower_dev,
7811 bool val)
7812{
7813 struct netdev_adjacent *adj;
7814
7815 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7816 if (adj)
7817 adj->ignore = val;
7818
7819 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7820 if (adj)
7821 adj->ignore = val;
7822}
7823
7824static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7825 struct net_device *lower_dev)
7826{
7827 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7828}
7829
7830static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7831 struct net_device *lower_dev)
7832{
7833 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7834}
7835
7836int netdev_adjacent_change_prepare(struct net_device *old_dev,
7837 struct net_device *new_dev,
7838 struct net_device *dev,
7839 struct netlink_ext_ack *extack)
7840{
7841 int err;
7842
7843 if (!new_dev)
7844 return 0;
7845
7846 if (old_dev && new_dev != old_dev)
7847 netdev_adjacent_dev_disable(dev, old_dev);
7848
7849 err = netdev_upper_dev_link(new_dev, dev, extack);
7850 if (err) {
7851 if (old_dev && new_dev != old_dev)
7852 netdev_adjacent_dev_enable(dev, old_dev);
7853 return err;
7854 }
7855
7856 return 0;
7857}
7858EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7859
7860void netdev_adjacent_change_commit(struct net_device *old_dev,
7861 struct net_device *new_dev,
7862 struct net_device *dev)
7863{
7864 if (!new_dev || !old_dev)
7865 return;
7866
7867 if (new_dev == old_dev)
7868 return;
7869
7870 netdev_adjacent_dev_enable(dev, old_dev);
7871 netdev_upper_dev_unlink(old_dev, dev);
7872}
7873EXPORT_SYMBOL(netdev_adjacent_change_commit);
7874
7875void netdev_adjacent_change_abort(struct net_device *old_dev,
7876 struct net_device *new_dev,
7877 struct net_device *dev)
7878{
7879 if (!new_dev)
7880 return;
7881
7882 if (old_dev && new_dev != old_dev)
7883 netdev_adjacent_dev_enable(dev, old_dev);
7884
7885 netdev_upper_dev_unlink(new_dev, dev);
7886}
7887EXPORT_SYMBOL(netdev_adjacent_change_abort);
7888
Moni Shoua61bd3852015-02-03 16:48:29 +02007889/**
7890 * netdev_bonding_info_change - Dispatch event about slave change
7891 * @dev: device
Masanari Iida4a26e4532015-02-14 22:26:34 +09007892 * @bonding_info: info to dispatch
Moni Shoua61bd3852015-02-03 16:48:29 +02007893 *
7894 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7895 * The caller must hold the RTNL lock.
7896 */
7897void netdev_bonding_info_change(struct net_device *dev,
7898 struct netdev_bonding_info *bonding_info)
7899{
David Ahern51d0c0472017-10-04 17:48:45 -07007900 struct netdev_notifier_bonding_info info = {
7901 .info.dev = dev,
7902 };
Moni Shoua61bd3852015-02-03 16:48:29 +02007903
7904 memcpy(&info.bonding_info, bonding_info,
7905 sizeof(struct netdev_bonding_info));
David Ahern51d0c0472017-10-04 17:48:45 -07007906 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
Moni Shoua61bd3852015-02-03 16:48:29 +02007907 &info.info);
7908}
7909EXPORT_SYMBOL(netdev_bonding_info_change);
7910
Maor Gottliebcff9f122020-04-30 22:21:31 +03007911/**
7912 * netdev_get_xmit_slave - Get the xmit slave of master device
Andrew Lunn88425002020-07-13 01:14:59 +02007913 * @dev: device
Maor Gottliebcff9f122020-04-30 22:21:31 +03007914 * @skb: The packet
7915 * @all_slaves: assume all the slaves are active
7916 *
7917 * The reference counters are not incremented so the caller must be
7918 * careful with locks. The caller must hold RCU lock.
7919 * %NULL is returned if no slave is found.
7920 */
7921
7922struct net_device *netdev_get_xmit_slave(struct net_device *dev,
7923 struct sk_buff *skb,
7924 bool all_slaves)
7925{
7926 const struct net_device_ops *ops = dev->netdev_ops;
7927
7928 if (!ops->ndo_get_xmit_slave)
7929 return NULL;
7930 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
7931}
7932EXPORT_SYMBOL(netdev_get_xmit_slave);
7933
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007934static void netdev_adjacent_add_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007935{
7936 struct netdev_adjacent *iter;
7937
7938 struct net *net = dev_net(dev);
7939
7940 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08007941 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007942 continue;
7943 netdev_adjacent_sysfs_add(iter->dev, dev,
7944 &iter->dev->adj_list.lower);
7945 netdev_adjacent_sysfs_add(dev, iter->dev,
7946 &dev->adj_list.upper);
7947 }
7948
7949 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08007950 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007951 continue;
7952 netdev_adjacent_sysfs_add(iter->dev, dev,
7953 &iter->dev->adj_list.upper);
7954 netdev_adjacent_sysfs_add(dev, iter->dev,
7955 &dev->adj_list.lower);
7956 }
7957}
7958
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08007959static void netdev_adjacent_del_links(struct net_device *dev)
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007960{
7961 struct netdev_adjacent *iter;
7962
7963 struct net *net = dev_net(dev);
7964
7965 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08007966 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007967 continue;
7968 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7969 &iter->dev->adj_list.lower);
7970 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7971 &dev->adj_list.upper);
7972 }
7973
7974 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08007975 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007976 continue;
7977 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7978 &iter->dev->adj_list.upper);
7979 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7980 &dev->adj_list.lower);
7981 }
7982}
7983
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01007984void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02007985{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01007986 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02007987
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007988 struct net *net = dev_net(dev);
7989
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01007990 list_for_each_entry(iter, &dev->adj_list.upper, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08007991 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04007992 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01007993 netdev_adjacent_sysfs_del(iter->dev, oldname,
7994 &iter->dev->adj_list.lower);
7995 netdev_adjacent_sysfs_add(iter->dev, dev,
7996 &iter->dev->adj_list.lower);
7997 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02007998
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01007999 list_for_each_entry(iter, &dev->adj_list.lower, list) {
Wei Tangbe4da0e2016-06-16 21:30:12 +08008000 if (!net_eq(net, dev_net(iter->dev)))
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +04008001 continue;
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01008002 netdev_adjacent_sysfs_del(iter->dev, oldname,
8003 &iter->dev->adj_list.upper);
8004 netdev_adjacent_sysfs_add(iter->dev, dev,
8005 &iter->dev->adj_list.upper);
8006 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02008007}
Veaceslav Falico402dae92013-09-25 09:20:09 +02008008
8009void *netdev_lower_dev_get_private(struct net_device *dev,
8010 struct net_device *lower_dev)
8011{
8012 struct netdev_adjacent *lower;
8013
8014 if (!lower_dev)
8015 return NULL;
Michal Kubeček6ea29da2015-09-24 10:59:05 +02008016 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
Veaceslav Falico402dae92013-09-25 09:20:09 +02008017 if (!lower)
8018 return NULL;
8019
8020 return lower->private;
8021}
8022EXPORT_SYMBOL(netdev_lower_dev_get_private);
8023
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04008024
Jiri Pirko04d48262015-12-03 12:12:15 +01008025/**
8026 * netdev_lower_change - Dispatch event about lower device state change
8027 * @lower_dev: device
8028 * @lower_state_info: state to dispatch
8029 *
8030 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8031 * The caller must hold the RTNL lock.
8032 */
8033void netdev_lower_state_changed(struct net_device *lower_dev,
8034 void *lower_state_info)
8035{
David Ahern51d0c0472017-10-04 17:48:45 -07008036 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8037 .info.dev = lower_dev,
8038 };
Jiri Pirko04d48262015-12-03 12:12:15 +01008039
8040 ASSERT_RTNL();
8041 changelowerstate_info.lower_state_info = lower_state_info;
David Ahern51d0c0472017-10-04 17:48:45 -07008042 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
Jiri Pirko04d48262015-12-03 12:12:15 +01008043 &changelowerstate_info.info);
8044}
8045EXPORT_SYMBOL(netdev_lower_state_changed);
8046
Patrick McHardyb6c40d62008-10-07 15:26:48 -07008047static void dev_change_rx_flags(struct net_device *dev, int flags)
8048{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08008049 const struct net_device_ops *ops = dev->netdev_ops;
8050
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05008051 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08008052 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07008053}
8054
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008055static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07008056{
Eric Dumazetb536db92011-11-30 21:42:26 +00008057 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06008058 kuid_t uid;
8059 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07008060
Patrick McHardy24023452007-07-14 18:51:31 -07008061 ASSERT_RTNL();
8062
Wang Chendad9b332008-06-18 01:48:28 -07008063 dev->flags |= IFF_PROMISC;
8064 dev->promiscuity += inc;
8065 if (dev->promiscuity == 0) {
8066 /*
8067 * Avoid overflow.
8068 * If inc causes overflow, untouch promisc and return error.
8069 */
8070 if (inc < 0)
8071 dev->flags &= ~IFF_PROMISC;
8072 else {
8073 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008074 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
8075 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07008076 return -EOVERFLOW;
8077 }
8078 }
Patrick McHardy4417da62007-06-27 01:28:10 -07008079 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008080 pr_info("device %s %s promiscuous mode\n",
8081 dev->name,
8082 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11008083 if (audit_enabled) {
8084 current_uid_gid(&uid, &gid);
Richard Guy Briggscdfb6b32018-05-12 21:58:20 -04008085 audit_log(audit_context(), GFP_ATOMIC,
8086 AUDIT_ANOM_PROMISCUOUS,
8087 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8088 dev->name, (dev->flags & IFF_PROMISC),
8089 (old_flags & IFF_PROMISC),
8090 from_kuid(&init_user_ns, audit_get_loginuid(current)),
8091 from_kuid(&init_user_ns, uid),
8092 from_kgid(&init_user_ns, gid),
8093 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11008094 }
Patrick McHardy24023452007-07-14 18:51:31 -07008095
Patrick McHardyb6c40d62008-10-07 15:26:48 -07008096 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07008097 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008098 if (notify)
8099 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07008100 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07008101}
8102
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103/**
8104 * dev_set_promiscuity - update promiscuity count on a device
8105 * @dev: device
8106 * @inc: modifier
8107 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07008108 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07008109 * remains above zero the interface remains promiscuous. Once it hits zero
8110 * the device reverts back to normal filtering operation. A negative inc
8111 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07008112 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008113 */
Wang Chendad9b332008-06-18 01:48:28 -07008114int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008115{
Eric Dumazetb536db92011-11-30 21:42:26 +00008116 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07008117 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008118
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008119 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07008120 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07008121 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07008122 if (dev->flags != old_flags)
8123 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07008124 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008125}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008126EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008128static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008129{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008130 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008131
Patrick McHardy24023452007-07-14 18:51:31 -07008132 ASSERT_RTNL();
8133
Linus Torvalds1da177e2005-04-16 15:20:36 -07008134 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07008135 dev->allmulti += inc;
8136 if (dev->allmulti == 0) {
8137 /*
8138 * Avoid overflow.
8139 * If inc causes overflow, untouch allmulti and return error.
8140 */
8141 if (inc < 0)
8142 dev->flags &= ~IFF_ALLMULTI;
8143 else {
8144 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008145 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8146 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07008147 return -EOVERFLOW;
8148 }
8149 }
Patrick McHardy24023452007-07-14 18:51:31 -07008150 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07008151 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07008152 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008153 if (notify)
8154 __dev_notify_flags(dev, old_flags,
8155 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07008156 }
Wang Chendad9b332008-06-18 01:48:28 -07008157 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07008158}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008159
8160/**
8161 * dev_set_allmulti - update allmulti count on a device
8162 * @dev: device
8163 * @inc: modifier
8164 *
8165 * Add or remove reception of all multicast frames to a device. While the
8166 * count in the device remains above zero the interface remains listening
8167 * to all interfaces. Once it hits zero the device reverts back to normal
8168 * filtering operation. A negative @inc value is used to drop the counter
8169 * when releasing a resource needing all multicasts.
8170 * Return 0 if successful or a negative errno code on error.
8171 */
8172
8173int dev_set_allmulti(struct net_device *dev, int inc)
8174{
8175 return __dev_set_allmulti(dev, inc, true);
8176}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008177EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07008178
8179/*
8180 * Upload unicast and multicast address lists to device and
8181 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08008182 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07008183 * are present.
8184 */
8185void __dev_set_rx_mode(struct net_device *dev)
8186{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08008187 const struct net_device_ops *ops = dev->netdev_ops;
8188
Patrick McHardy4417da62007-06-27 01:28:10 -07008189 /* dev_open will call this function so the list will stay sane. */
8190 if (!(dev->flags&IFF_UP))
8191 return;
8192
8193 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09008194 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07008195
Jiri Pirko01789342011-08-16 06:29:00 +00008196 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07008197 /* Unicast addresses changes may only happen under the rtnl,
8198 * therefore calling __dev_set_promiscuity here is safe.
8199 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08008200 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008201 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07008202 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08008203 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008204 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07008205 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07008206 }
Patrick McHardy4417da62007-06-27 01:28:10 -07008207 }
Jiri Pirko01789342011-08-16 06:29:00 +00008208
8209 if (ops->ndo_set_rx_mode)
8210 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07008211}
8212
8213void dev_set_rx_mode(struct net_device *dev)
8214{
David S. Millerb9e40852008-07-15 00:15:08 -07008215 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07008216 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07008217 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008218}
8219
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008220/**
8221 * dev_get_flags - get flags reported to userspace
8222 * @dev: device
8223 *
8224 * Get the combination of flag bits exported through APIs to userspace.
8225 */
Eric Dumazet95c96172012-04-15 05:58:06 +00008226unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008227{
Eric Dumazet95c96172012-04-15 05:58:06 +00008228 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008229
8230 flags = (dev->flags & ~(IFF_PROMISC |
8231 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08008232 IFF_RUNNING |
8233 IFF_LOWER_UP |
8234 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07008235 (dev->gflags & (IFF_PROMISC |
8236 IFF_ALLMULTI));
8237
Stefan Rompfb00055a2006-03-20 17:09:11 -08008238 if (netif_running(dev)) {
8239 if (netif_oper_up(dev))
8240 flags |= IFF_RUNNING;
8241 if (netif_carrier_ok(dev))
8242 flags |= IFF_LOWER_UP;
8243 if (netif_dormant(dev))
8244 flags |= IFF_DORMANT;
8245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008246
8247 return flags;
8248}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008249EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250
Petr Machata6d040322018-12-06 17:05:43 +00008251int __dev_change_flags(struct net_device *dev, unsigned int flags,
8252 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008253{
Eric Dumazetb536db92011-11-30 21:42:26 +00008254 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00008255 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256
Patrick McHardy24023452007-07-14 18:51:31 -07008257 ASSERT_RTNL();
8258
Linus Torvalds1da177e2005-04-16 15:20:36 -07008259 /*
8260 * Set the flags on our device.
8261 */
8262
8263 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8264 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8265 IFF_AUTOMEDIA)) |
8266 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8267 IFF_ALLMULTI));
8268
8269 /*
8270 * Load in the correct multicast list now the flags have changed.
8271 */
8272
Patrick McHardyb6c40d62008-10-07 15:26:48 -07008273 if ((old_flags ^ flags) & IFF_MULTICAST)
8274 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07008275
Patrick McHardy4417da62007-06-27 01:28:10 -07008276 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008277
8278 /*
8279 * Have we downed the interface. We handle IFF_UP ourselves
8280 * according to user attempts to set it, rather than blindly
8281 * setting it.
8282 */
8283
8284 ret = 0;
stephen hemminger7051b882017-07-18 15:59:27 -07008285 if ((old_flags ^ flags) & IFF_UP) {
8286 if (old_flags & IFF_UP)
8287 __dev_close(dev);
8288 else
Petr Machata40c900a2018-12-06 17:05:47 +00008289 ret = __dev_open(dev, extack);
stephen hemminger7051b882017-07-18 15:59:27 -07008290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008291
Linus Torvalds1da177e2005-04-16 15:20:36 -07008292 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008293 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008294 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008295
Linus Torvalds1da177e2005-04-16 15:20:36 -07008296 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008297
8298 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8299 if (dev->flags != old_flags)
8300 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008301 }
8302
8303 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
tchardingeb13da12017-02-09 17:56:06 +11008304 * is important. Some (broken) drivers set IFF_PROMISC, when
8305 * IFF_ALLMULTI is requested not asking us and not reporting.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008306 */
8307 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008308 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8309
Linus Torvalds1da177e2005-04-16 15:20:36 -07008310 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008311 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008312 }
8313
Patrick McHardybd380812010-02-26 06:34:53 +00008314 return ret;
8315}
8316
Nicolas Dichtela528c212013-09-25 12:02:44 +02008317void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8318 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00008319{
8320 unsigned int changes = dev->flags ^ old_flags;
8321
Nicolas Dichtela528c212013-09-25 12:02:44 +02008322 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07008323 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02008324
Patrick McHardybd380812010-02-26 06:34:53 +00008325 if (changes & IFF_UP) {
8326 if (dev->flags & IFF_UP)
8327 call_netdevice_notifiers(NETDEV_UP, dev);
8328 else
8329 call_netdevice_notifiers(NETDEV_DOWN, dev);
8330 }
8331
8332 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00008333 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
David Ahern51d0c0472017-10-04 17:48:45 -07008334 struct netdev_notifier_change_info change_info = {
8335 .info = {
8336 .dev = dev,
8337 },
8338 .flags_changed = changes,
8339 };
Jiri Pirkobe9efd32013-05-28 01:30:22 +00008340
David Ahern51d0c0472017-10-04 17:48:45 -07008341 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
Jiri Pirkobe9efd32013-05-28 01:30:22 +00008342 }
Patrick McHardybd380812010-02-26 06:34:53 +00008343}
8344
8345/**
8346 * dev_change_flags - change device settings
8347 * @dev: device
8348 * @flags: device state flags
Petr Machata567c5e12018-12-06 17:05:42 +00008349 * @extack: netlink extended ack
Patrick McHardybd380812010-02-26 06:34:53 +00008350 *
8351 * Change settings on device based state flags. The flags are
8352 * in the userspace exported format.
8353 */
Petr Machata567c5e12018-12-06 17:05:42 +00008354int dev_change_flags(struct net_device *dev, unsigned int flags,
8355 struct netlink_ext_ack *extack)
Patrick McHardybd380812010-02-26 06:34:53 +00008356{
Eric Dumazetb536db92011-11-30 21:42:26 +00008357 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008358 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00008359
Petr Machata6d040322018-12-06 17:05:43 +00008360 ret = __dev_change_flags(dev, flags, extack);
Patrick McHardybd380812010-02-26 06:34:53 +00008361 if (ret < 0)
8362 return ret;
8363
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02008364 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02008365 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008366 return ret;
8367}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008368EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008369
WANG Congf51048c2017-07-06 15:01:57 -07008370int __dev_set_mtu(struct net_device *dev, int new_mtu)
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008371{
8372 const struct net_device_ops *ops = dev->netdev_ops;
8373
8374 if (ops->ndo_change_mtu)
8375 return ops->ndo_change_mtu(dev, new_mtu);
8376
Eric Dumazet501a90c2019-12-05 20:43:46 -08008377 /* Pairs with all the lockless reads of dev->mtu in the stack */
8378 WRITE_ONCE(dev->mtu, new_mtu);
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008379 return 0;
8380}
WANG Congf51048c2017-07-06 15:01:57 -07008381EXPORT_SYMBOL(__dev_set_mtu);
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008382
Eric Dumazetd836f5c2020-01-21 22:47:29 -08008383int dev_validate_mtu(struct net_device *dev, int new_mtu,
8384 struct netlink_ext_ack *extack)
8385{
8386 /* MTU must be positive, and in range */
8387 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8388 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8389 return -EINVAL;
8390 }
8391
8392 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8393 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8394 return -EINVAL;
8395 }
8396 return 0;
8397}
8398
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008399/**
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07008400 * dev_set_mtu_ext - Change maximum transfer unit
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008401 * @dev: device
8402 * @new_mtu: new transfer unit
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07008403 * @extack: netlink extended ack
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008404 *
8405 * Change the maximum transfer size of the network device.
8406 */
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07008407int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8408 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008409{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008410 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008411
8412 if (new_mtu == dev->mtu)
8413 return 0;
8414
Eric Dumazetd836f5c2020-01-21 22:47:29 -08008415 err = dev_validate_mtu(dev, new_mtu, extack);
8416 if (err)
8417 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008418
8419 if (!netif_device_present(dev))
8420 return -ENODEV;
8421
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01008422 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8423 err = notifier_to_errno(err);
8424 if (err)
8425 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08008426
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008427 orig_mtu = dev->mtu;
8428 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008429
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008430 if (!err) {
Sabrina Dubrocaaf7d6cc2018-10-09 17:48:14 +02008431 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8432 orig_mtu);
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008433 err = notifier_to_errno(err);
8434 if (err) {
8435 /* setting mtu back and notifying everyone again,
8436 * so that they have a chance to revert changes.
8437 */
8438 __dev_set_mtu(dev, orig_mtu);
Sabrina Dubrocaaf7d6cc2018-10-09 17:48:14 +02008439 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8440 new_mtu);
Veaceslav Falico2315dc92014-01-10 16:56:25 +01008441 }
8442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008443 return err;
8444}
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07008445
8446int dev_set_mtu(struct net_device *dev, int new_mtu)
8447{
8448 struct netlink_ext_ack extack;
8449 int err;
8450
Li RongQinga6bcfc82018-08-03 15:45:21 +08008451 memset(&extack, 0, sizeof(extack));
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07008452 err = dev_set_mtu_ext(dev, new_mtu, &extack);
Li RongQinga6bcfc82018-08-03 15:45:21 +08008453 if (err && extack._msg)
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07008454 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8455 return err;
8456}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008457EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008458
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008459/**
Cong Wang6a643dd2018-01-25 18:26:22 -08008460 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8461 * @dev: device
8462 * @new_len: new tx queue length
8463 */
8464int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8465{
8466 unsigned int orig_len = dev->tx_queue_len;
8467 int res;
8468
8469 if (new_len != (unsigned int)new_len)
8470 return -ERANGE;
8471
8472 if (new_len != orig_len) {
8473 dev->tx_queue_len = new_len;
8474 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8475 res = notifier_to_errno(res);
Tariq Toukan7effaf02018-07-24 14:12:20 +03008476 if (res)
8477 goto err_rollback;
8478 res = dev_qdisc_change_tx_queue_len(dev);
8479 if (res)
8480 goto err_rollback;
Cong Wang6a643dd2018-01-25 18:26:22 -08008481 }
8482
8483 return 0;
Tariq Toukan7effaf02018-07-24 14:12:20 +03008484
8485err_rollback:
8486 netdev_err(dev, "refused to change device tx_queue_len\n");
8487 dev->tx_queue_len = orig_len;
8488 return res;
Cong Wang6a643dd2018-01-25 18:26:22 -08008489}
8490
8491/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00008492 * dev_set_group - Change group this device belongs to
8493 * @dev: device
8494 * @new_group: group this device should belong to
8495 */
8496void dev_set_group(struct net_device *dev, int new_group)
8497{
8498 dev->group = new_group;
8499}
8500EXPORT_SYMBOL(dev_set_group);
8501
8502/**
Petr Machatad59cdf92018-12-13 11:54:35 +00008503 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8504 * @dev: device
8505 * @addr: new address
8506 * @extack: netlink extended ack
8507 */
8508int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8509 struct netlink_ext_ack *extack)
8510{
8511 struct netdev_notifier_pre_changeaddr_info info = {
8512 .info.dev = dev,
8513 .info.extack = extack,
8514 .dev_addr = addr,
8515 };
8516 int rc;
8517
8518 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8519 return notifier_to_errno(rc);
8520}
8521EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8522
8523/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008524 * dev_set_mac_address - Change Media Access Control Address
8525 * @dev: device
8526 * @sa: new address
Petr Machata3a37a962018-12-13 11:54:30 +00008527 * @extack: netlink extended ack
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07008528 *
8529 * Change the hardware (MAC) address of the device
8530 */
Petr Machata3a37a962018-12-13 11:54:30 +00008531int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8532 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008533{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08008534 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008535 int err;
8536
Stephen Hemmingerd3147742008-11-19 21:32:24 -08008537 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008538 return -EOPNOTSUPP;
8539 if (sa->sa_family != dev->type)
8540 return -EINVAL;
8541 if (!netif_device_present(dev))
8542 return -ENODEV;
Petr Machatad59cdf92018-12-13 11:54:35 +00008543 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8544 if (err)
8545 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08008546 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00008547 if (err)
8548 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00008549 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00008550 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04008551 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00008552 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008553}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07008554EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008555
Jiri Pirko4bf84c32012-12-27 23:49:37 +00008556/**
8557 * dev_change_carrier - Change device carrier
8558 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00008559 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00008560 *
8561 * Change device carrier
8562 */
8563int dev_change_carrier(struct net_device *dev, bool new_carrier)
8564{
8565 const struct net_device_ops *ops = dev->netdev_ops;
8566
8567 if (!ops->ndo_change_carrier)
8568 return -EOPNOTSUPP;
8569 if (!netif_device_present(dev))
8570 return -ENODEV;
8571 return ops->ndo_change_carrier(dev, new_carrier);
8572}
8573EXPORT_SYMBOL(dev_change_carrier);
8574
Linus Torvalds1da177e2005-04-16 15:20:36 -07008575/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02008576 * dev_get_phys_port_id - Get device physical port ID
8577 * @dev: device
8578 * @ppid: port ID
8579 *
8580 * Get device physical port ID
8581 */
8582int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01008583 struct netdev_phys_item_id *ppid)
Jiri Pirko66b52b02013-07-29 18:16:49 +02008584{
8585 const struct net_device_ops *ops = dev->netdev_ops;
8586
8587 if (!ops->ndo_get_phys_port_id)
8588 return -EOPNOTSUPP;
8589 return ops->ndo_get_phys_port_id(dev, ppid);
8590}
8591EXPORT_SYMBOL(dev_get_phys_port_id);
8592
8593/**
David Aherndb24a902015-03-17 20:23:15 -06008594 * dev_get_phys_port_name - Get device physical port name
8595 * @dev: device
8596 * @name: port name
Luis de Bethencourted49e652016-03-21 16:31:14 +00008597 * @len: limit of bytes to copy to name
David Aherndb24a902015-03-17 20:23:15 -06008598 *
8599 * Get device physical port name
8600 */
8601int dev_get_phys_port_name(struct net_device *dev,
8602 char *name, size_t len)
8603{
8604 const struct net_device_ops *ops = dev->netdev_ops;
Jiri Pirkoaf3836d2019-03-28 13:56:37 +01008605 int err;
David Aherndb24a902015-03-17 20:23:15 -06008606
Jiri Pirkoaf3836d2019-03-28 13:56:37 +01008607 if (ops->ndo_get_phys_port_name) {
8608 err = ops->ndo_get_phys_port_name(dev, name, len);
8609 if (err != -EOPNOTSUPP)
8610 return err;
8611 }
8612 return devlink_compat_phys_port_name_get(dev, name, len);
David Aherndb24a902015-03-17 20:23:15 -06008613}
8614EXPORT_SYMBOL(dev_get_phys_port_name);
8615
8616/**
Florian Fainellid6abc5962019-02-06 09:45:35 -08008617 * dev_get_port_parent_id - Get the device's port parent identifier
8618 * @dev: network device
8619 * @ppid: pointer to a storage for the port's parent identifier
8620 * @recurse: allow/disallow recursion to lower devices
8621 *
8622 * Get the devices's port parent identifier
8623 */
8624int dev_get_port_parent_id(struct net_device *dev,
8625 struct netdev_phys_item_id *ppid,
8626 bool recurse)
8627{
8628 const struct net_device_ops *ops = dev->netdev_ops;
8629 struct netdev_phys_item_id first = { };
8630 struct net_device *lower_dev;
8631 struct list_head *iter;
Jiri Pirko7e1146e2019-04-03 14:24:17 +02008632 int err;
Florian Fainellid6abc5962019-02-06 09:45:35 -08008633
Jiri Pirko7e1146e2019-04-03 14:24:17 +02008634 if (ops->ndo_get_port_parent_id) {
8635 err = ops->ndo_get_port_parent_id(dev, ppid);
8636 if (err != -EOPNOTSUPP)
8637 return err;
8638 }
8639
8640 err = devlink_compat_switch_id_get(dev, ppid);
8641 if (!err || err != -EOPNOTSUPP)
8642 return err;
Florian Fainellid6abc5962019-02-06 09:45:35 -08008643
8644 if (!recurse)
Jiri Pirko7e1146e2019-04-03 14:24:17 +02008645 return -EOPNOTSUPP;
Florian Fainellid6abc5962019-02-06 09:45:35 -08008646
8647 netdev_for_each_lower_dev(dev, lower_dev, iter) {
8648 err = dev_get_port_parent_id(lower_dev, ppid, recurse);
8649 if (err)
8650 break;
8651 if (!first.id_len)
8652 first = *ppid;
8653 else if (memcmp(&first, ppid, sizeof(*ppid)))
8654 return -ENODATA;
8655 }
8656
8657 return err;
8658}
8659EXPORT_SYMBOL(dev_get_port_parent_id);
8660
8661/**
8662 * netdev_port_same_parent_id - Indicate if two network devices have
8663 * the same port parent identifier
8664 * @a: first network device
8665 * @b: second network device
8666 */
8667bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
8668{
8669 struct netdev_phys_item_id a_id = { };
8670 struct netdev_phys_item_id b_id = { };
8671
8672 if (dev_get_port_parent_id(a, &a_id, true) ||
8673 dev_get_port_parent_id(b, &b_id, true))
8674 return false;
8675
8676 return netdev_phys_item_id_same(&a_id, &b_id);
8677}
8678EXPORT_SYMBOL(netdev_port_same_parent_id);
8679
8680/**
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07008681 * dev_change_proto_down - update protocol port state information
8682 * @dev: device
8683 * @proto_down: new value
8684 *
8685 * This info can be used by switch drivers to set the phys state of the
8686 * port.
8687 */
8688int dev_change_proto_down(struct net_device *dev, bool proto_down)
8689{
8690 const struct net_device_ops *ops = dev->netdev_ops;
8691
8692 if (!ops->ndo_change_proto_down)
8693 return -EOPNOTSUPP;
8694 if (!netif_device_present(dev))
8695 return -ENODEV;
8696 return ops->ndo_change_proto_down(dev, proto_down);
8697}
8698EXPORT_SYMBOL(dev_change_proto_down);
8699
Andy Roulinb5899672019-02-22 18:06:36 +00008700/**
8701 * dev_change_proto_down_generic - generic implementation for
8702 * ndo_change_proto_down that sets carrier according to
8703 * proto_down.
8704 *
8705 * @dev: device
8706 * @proto_down: new value
8707 */
8708int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
8709{
8710 if (proto_down)
8711 netif_carrier_off(dev);
8712 else
8713 netif_carrier_on(dev);
8714 dev->proto_down = proto_down;
8715 return 0;
8716}
8717EXPORT_SYMBOL(dev_change_proto_down_generic);
8718
Jakub Kicinskia25717d2018-07-11 20:36:41 -07008719u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
8720 enum bpf_netdev_command cmd)
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008721{
Jakub Kicinskif4e63522017-11-03 13:56:16 -07008722 struct netdev_bpf xdp;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008723
Jakub Kicinskia25717d2018-07-11 20:36:41 -07008724 if (!bpf_op)
8725 return 0;
Martin KaFai Lau58038692017-06-15 17:29:09 -07008726
Jakub Kicinskia25717d2018-07-11 20:36:41 -07008727 memset(&xdp, 0, sizeof(xdp));
8728 xdp.command = cmd;
8729
8730 /* Query must always succeed. */
8731 WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008732
Jakub Kicinski6b867582018-07-11 20:36:39 -07008733 return xdp.prog_id;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008734}
8735
Jakub Kicinskif4e63522017-11-03 13:56:16 -07008736static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
Jakub Kicinski32d60272017-06-21 18:25:03 -07008737 struct netlink_ext_ack *extack, u32 flags,
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008738 struct bpf_prog *prog)
8739{
Björn Töpel7e6897f2019-12-13 18:51:09 +01008740 bool non_hw = !(flags & XDP_FLAGS_HW_MODE);
8741 struct bpf_prog *prev_prog = NULL;
Jakub Kicinskif4e63522017-11-03 13:56:16 -07008742 struct netdev_bpf xdp;
Björn Töpel7e6897f2019-12-13 18:51:09 +01008743 int err;
8744
8745 if (non_hw) {
8746 prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op,
8747 XDP_QUERY_PROG));
8748 if (IS_ERR(prev_prog))
8749 prev_prog = NULL;
8750 }
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008751
8752 memset(&xdp, 0, sizeof(xdp));
Jakub Kicinskiee5d0322017-06-21 18:25:04 -07008753 if (flags & XDP_FLAGS_HW_MODE)
8754 xdp.command = XDP_SETUP_PROG_HW;
8755 else
8756 xdp.command = XDP_SETUP_PROG;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008757 xdp.extack = extack;
Jakub Kicinski32d60272017-06-21 18:25:03 -07008758 xdp.flags = flags;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008759 xdp.prog = prog;
8760
Björn Töpel7e6897f2019-12-13 18:51:09 +01008761 err = bpf_op(dev, &xdp);
8762 if (!err && non_hw)
8763 bpf_prog_change_xdp(prev_prog, prog);
8764
8765 if (prev_prog)
8766 bpf_prog_put(prev_prog);
8767
8768 return err;
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008769}
8770
Jakub Kicinskibd0b2e72017-12-01 15:08:57 -08008771static void dev_xdp_uninstall(struct net_device *dev)
8772{
8773 struct netdev_bpf xdp;
8774 bpf_op_t ndo_bpf;
8775
8776 /* Remove generic XDP */
8777 WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
8778
8779 /* Remove from the driver */
8780 ndo_bpf = dev->netdev_ops->ndo_bpf;
8781 if (!ndo_bpf)
8782 return;
8783
Jakub Kicinskia25717d2018-07-11 20:36:41 -07008784 memset(&xdp, 0, sizeof(xdp));
8785 xdp.command = XDP_QUERY_PROG;
8786 WARN_ON(ndo_bpf(dev, &xdp));
8787 if (xdp.prog_id)
8788 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8789 NULL));
Jakub Kicinskibd0b2e72017-12-01 15:08:57 -08008790
Jakub Kicinskia25717d2018-07-11 20:36:41 -07008791 /* Remove HW offload */
8792 memset(&xdp, 0, sizeof(xdp));
8793 xdp.command = XDP_QUERY_PROG_HW;
8794 if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
8795 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
8796 NULL));
Jakub Kicinskibd0b2e72017-12-01 15:08:57 -08008797}
8798
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07008799/**
Brenden Blancoa7862b42016-07-19 12:16:48 -07008800 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
8801 * @dev: device
Jakub Kicinskib5d60982017-05-01 15:53:43 -07008802 * @extack: netlink extended ack
Brenden Blancoa7862b42016-07-19 12:16:48 -07008803 * @fd: new program fd or negative value to clear
Toke Høiland-Jørgensen92234c82020-03-25 18:23:26 +01008804 * @expected_fd: old program fd that userspace expects to replace or clear
Daniel Borkmann85de8572016-11-28 23:16:54 +01008805 * @flags: xdp-related flags
Brenden Blancoa7862b42016-07-19 12:16:48 -07008806 *
8807 * Set or clear a bpf program for a device
8808 */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -07008809int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
Toke Høiland-Jørgensen92234c82020-03-25 18:23:26 +01008810 int fd, int expected_fd, u32 flags)
Brenden Blancoa7862b42016-07-19 12:16:48 -07008811{
8812 const struct net_device_ops *ops = dev->netdev_ops;
Jakub Kicinskia25717d2018-07-11 20:36:41 -07008813 enum bpf_netdev_command query;
Toke Høiland-Jørgensen92234c82020-03-25 18:23:26 +01008814 u32 prog_id, expected_id = 0;
Jakub Kicinskif4e63522017-11-03 13:56:16 -07008815 bpf_op_t bpf_op, bpf_chk;
David Aherndfa74902020-04-12 07:32:04 -06008816 struct bpf_prog *prog;
Jakub Kicinski9ee963d2019-02-05 20:03:21 -08008817 bool offload;
Brenden Blancoa7862b42016-07-19 12:16:48 -07008818 int err;
8819
Daniel Borkmann85de8572016-11-28 23:16:54 +01008820 ASSERT_RTNL();
8821
Jakub Kicinski9ee963d2019-02-05 20:03:21 -08008822 offload = flags & XDP_FLAGS_HW_MODE;
8823 query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
Jakub Kicinskia25717d2018-07-11 20:36:41 -07008824
Jakub Kicinskif4e63522017-11-03 13:56:16 -07008825 bpf_op = bpf_chk = ops->ndo_bpf;
Maciej Fijalkowski01dde202019-02-01 22:42:27 +01008826 if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) {
8827 NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode");
Daniel Borkmann0489df92017-05-12 01:04:45 +02008828 return -EOPNOTSUPP;
Maciej Fijalkowski01dde202019-02-01 22:42:27 +01008829 }
Jakub Kicinskif4e63522017-11-03 13:56:16 -07008830 if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
8831 bpf_op = generic_xdp_install;
8832 if (bpf_op == bpf_chk)
8833 bpf_chk = generic_xdp_install;
David S. Millerb5cdae32017-04-18 15:36:58 -04008834
Toke Høiland-Jørgensen92234c82020-03-25 18:23:26 +01008835 prog_id = __dev_xdp_query(dev, bpf_op, query);
8836 if (flags & XDP_FLAGS_REPLACE) {
8837 if (expected_fd >= 0) {
8838 prog = bpf_prog_get_type_dev(expected_fd,
8839 BPF_PROG_TYPE_XDP,
8840 bpf_op == ops->ndo_bpf);
8841 if (IS_ERR(prog))
8842 return PTR_ERR(prog);
8843 expected_id = prog->aux->id;
8844 bpf_prog_put(prog);
8845 }
Maxim Mikityanskiyc14a9f62019-08-14 14:34:06 +00008846
Toke Høiland-Jørgensen92234c82020-03-25 18:23:26 +01008847 if (prog_id != expected_id) {
8848 NL_SET_ERR_MSG(extack, "Active program does not match expected");
8849 return -EEXIST;
8850 }
8851 }
8852 if (fd >= 0) {
Jakub Kicinski9ee963d2019-02-05 20:03:21 -08008853 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
Maciej Fijalkowski01dde202019-02-01 22:42:27 +01008854 NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008855 return -EEXIST;
Maciej Fijalkowski01dde202019-02-01 22:42:27 +01008856 }
Maxim Mikityanskiyc14a9f62019-08-14 14:34:06 +00008857
Maxim Mikityanskiyc14a9f62019-08-14 14:34:06 +00008858 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) {
Maciej Fijalkowski01dde202019-02-01 22:42:27 +01008859 NL_SET_ERR_MSG(extack, "XDP program already attached");
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02008860 return -EBUSY;
Maciej Fijalkowski01dde202019-02-01 22:42:27 +01008861 }
Daniel Borkmann85de8572016-11-28 23:16:54 +01008862
Jakub Kicinski288b3de2017-11-20 15:21:54 -08008863 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
8864 bpf_op == ops->ndo_bpf);
Brenden Blancoa7862b42016-07-19 12:16:48 -07008865 if (IS_ERR(prog))
8866 return PTR_ERR(prog);
Jakub Kicinski441a3302017-11-20 15:21:55 -08008867
Jakub Kicinski9ee963d2019-02-05 20:03:21 -08008868 if (!offload && bpf_prog_is_dev_bound(prog->aux)) {
Jakub Kicinski441a3302017-11-20 15:21:55 -08008869 NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
8870 bpf_prog_put(prog);
8871 return -EINVAL;
8872 }
Maxim Mikityanskiyc14a9f62019-08-14 14:34:06 +00008873
David Ahernfbee97f2020-05-29 16:07:13 -06008874 if (prog->expected_attach_type == BPF_XDP_DEVMAP) {
8875 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
8876 bpf_prog_put(prog);
8877 return -EINVAL;
8878 }
8879
Lorenzo Bianconi92164772020-07-14 15:56:38 +02008880 if (prog->expected_attach_type == BPF_XDP_CPUMAP) {
8881 NL_SET_ERR_MSG(extack,
8882 "BPF_XDP_CPUMAP programs can not be attached to a device");
8883 bpf_prog_put(prog);
8884 return -EINVAL;
8885 }
8886
Jakub Kicinskiaefc3e72019-10-31 20:07:00 -07008887 /* prog->aux->id may be 0 for orphaned device-bound progs */
8888 if (prog->aux->id && prog->aux->id == prog_id) {
Maxim Mikityanskiyc14a9f62019-08-14 14:34:06 +00008889 bpf_prog_put(prog);
8890 return 0;
8891 }
8892 } else {
Toke Høiland-Jørgensen92234c82020-03-25 18:23:26 +01008893 if (!prog_id)
Maxim Mikityanskiyc14a9f62019-08-14 14:34:06 +00008894 return 0;
David Aherndfa74902020-04-12 07:32:04 -06008895 prog = NULL;
Brenden Blancoa7862b42016-07-19 12:16:48 -07008896 }
8897
Jakub Kicinskif4e63522017-11-03 13:56:16 -07008898 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
Brenden Blancoa7862b42016-07-19 12:16:48 -07008899 if (err < 0 && prog)
8900 bpf_prog_put(prog);
8901
8902 return err;
8903}
Brenden Blancoa7862b42016-07-19 12:16:48 -07008904
8905/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008906 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07008907 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07008908 *
8909 * Returns a suitable unique value for a new device interface
8910 * number. The caller must hold the rtnl semaphore or the
8911 * dev_base_lock to be sure it remains unique.
8912 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07008913static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008914{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00008915 int ifindex = net->ifindex;
tchardingf4563a72017-02-09 17:56:07 +11008916
Linus Torvalds1da177e2005-04-16 15:20:36 -07008917 for (;;) {
8918 if (++ifindex <= 0)
8919 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07008920 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00008921 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008922 }
8923}
8924
Linus Torvalds1da177e2005-04-16 15:20:36 -07008925/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08008926static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07008927DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008928
Stephen Hemminger6f05f622007-03-08 20:46:03 -08008929static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008930{
Linus Torvalds1da177e2005-04-16 15:20:36 -07008931 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07008932 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008933}
8934
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008935static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008936{
Krishna Kumare93737b2009-12-08 22:26:02 +00008937 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07008938 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008939
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008940 BUG_ON(dev_boot_phase);
8941 ASSERT_RTNL();
8942
Krishna Kumare93737b2009-12-08 22:26:02 +00008943 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008944 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00008945 * for initialization unwind. Remove those
8946 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008947 */
8948 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00008949 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
8950 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008951
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008952 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00008953 list_del(&dev->unreg_list);
8954 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008955 }
Eric Dumazet449f4542011-05-19 12:24:16 +00008956 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008957 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00008958 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008959
Octavian Purdila44345722010-12-13 12:44:07 +00008960 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07008961 list_for_each_entry(dev, head, unreg_list)
8962 list_add_tail(&dev->close_list, &close_head);
David S. Miller99c4a262015-03-18 22:52:33 -04008963 dev_close_many(&close_head, true);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008964
Octavian Purdila44345722010-12-13 12:44:07 +00008965 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008966 /* And unlink it from device chain. */
8967 unlist_netdevice(dev);
8968
8969 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008970 }
Eric Dumazet41852492016-08-26 12:50:39 -07008971 flush_all_backlogs();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008972
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008973 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008974
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008975 list_for_each_entry(dev, head, unreg_list) {
Mahesh Bandewar395eea62014-12-03 13:46:24 -08008976 struct sk_buff *skb = NULL;
8977
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008978 /* Shutdown queueing discipline. */
8979 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008980
Jakub Kicinskibd0b2e72017-12-01 15:08:57 -08008981 dev_xdp_uninstall(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07008982
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008983 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +11008984 * this device. They should clean all the things.
8985 */
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008986 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8987
Mahesh Bandewar395eea62014-12-03 13:46:24 -08008988 if (!dev->rtnl_link_ops ||
8989 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Vlad Yasevich3d3ea5a2017-05-27 10:14:34 -04008990 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
Nicolas Dichtel38e01b32018-01-25 15:01:39 +01008991 GFP_KERNEL, NULL, 0);
Mahesh Bandewar395eea62014-12-03 13:46:24 -08008992
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008993 /*
8994 * Flush the unicast and multicast chains
8995 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00008996 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00008997 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00008998
Jiri Pirko36fbf1e2019-09-30 11:48:16 +02008999 netdev_name_node_alt_flush(dev);
Jiri Pirkoff927412019-09-30 11:48:15 +02009000 netdev_name_node_free(dev->name_node);
9001
Eric Dumazet9b5e3832009-10-27 07:04:19 +00009002 if (dev->netdev_ops->ndo_uninit)
9003 dev->netdev_ops->ndo_uninit(dev);
9004
Mahesh Bandewar395eea62014-12-03 13:46:24 -08009005 if (skb)
9006 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07009007
Jiri Pirko9ff162a2013-01-03 22:48:49 +00009008 /* Notifier chain MUST detach us all upper devices. */
9009 WARN_ON(netdev_has_any_upper_dev(dev));
David Ahern0f524a82016-10-17 19:15:52 -07009010 WARN_ON(netdev_has_any_lower_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00009011
9012 /* Remove entries from kobject tree */
9013 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00009014#ifdef CONFIG_XPS
9015 /* Remove XPS queueing entries */
9016 netif_reset_xps_queues_gt(dev, 0);
9017#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00009018 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009019
Eric W. Biederman850a5452011-10-13 22:25:23 +00009020 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009021
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00009022 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00009023 dev_put(dev);
9024}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009025
Eric Dumazet9b5e3832009-10-27 07:04:19 +00009026static void rollback_registered(struct net_device *dev)
9027{
9028 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009029
Eric Dumazet9b5e3832009-10-27 07:04:19 +00009030 list_add(&dev->unreg_list, &single);
9031 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00009032 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009033}
9034
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009035static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9036 struct net_device *upper, netdev_features_t features)
9037{
9038 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9039 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05009040 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009041
Hauke Mehrtens3b89ea92019-02-15 17:58:54 +01009042 for_each_netdev_feature(upper_disables, feature_bit) {
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05009043 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009044 if (!(upper->wanted_features & feature)
9045 && (features & feature)) {
9046 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9047 &feature, upper->name);
9048 features &= ~feature;
9049 }
9050 }
9051
9052 return features;
9053}
9054
9055static void netdev_sync_lower_features(struct net_device *upper,
9056 struct net_device *lower, netdev_features_t features)
9057{
9058 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9059 netdev_features_t feature;
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05009060 int feature_bit;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009061
Hauke Mehrtens3b89ea92019-02-15 17:58:54 +01009062 for_each_netdev_feature(upper_disables, feature_bit) {
Jarod Wilson5ba3f7d2015-11-03 10:15:59 -05009063 feature = __NETIF_F_BIT(feature_bit);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009064 if (!(features & feature) && (lower->features & feature)) {
9065 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9066 &feature, lower->name);
9067 lower->wanted_features &= ~feature;
Cong Wangdd912302020-05-07 12:19:03 -07009068 __netdev_update_features(lower);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009069
9070 if (unlikely(lower->features & feature))
9071 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9072 &feature, lower->name);
Cong Wangdd912302020-05-07 12:19:03 -07009073 else
9074 netdev_features_change(lower);
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009075 }
9076 }
9077}
9078
Michał Mirosławc8f44af2011-11-15 15:29:55 +00009079static netdev_features_t netdev_fix_features(struct net_device *dev,
9080 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07009081{
Michał Mirosław57422dc2011-01-22 12:14:12 +00009082 /* Fix illegal checksum combinations */
9083 if ((features & NETIF_F_HW_CSUM) &&
9084 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04009085 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00009086 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9087 }
9088
Herbert Xub63365a2008-10-23 01:11:29 -07009089 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00009090 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04009091 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00009092 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07009093 }
9094
Pravin B Shelarec5f0612013-03-07 09:28:01 +00009095 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9096 !(features & NETIF_F_IP_CSUM)) {
9097 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9098 features &= ~NETIF_F_TSO;
9099 features &= ~NETIF_F_TSO_ECN;
9100 }
9101
9102 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9103 !(features & NETIF_F_IPV6_CSUM)) {
9104 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9105 features &= ~NETIF_F_TSO6;
9106 }
9107
Alexander Duyckb1dc4972016-05-02 09:38:24 -07009108 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9109 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9110 features &= ~NETIF_F_TSO_MANGLEID;
9111
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00009112 /* TSO ECN requires that TSO is present as well. */
9113 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9114 features &= ~NETIF_F_TSO_ECN;
9115
Michał Mirosław212b5732011-02-15 16:59:16 +00009116 /* Software GSO depends on SG. */
9117 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04009118 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00009119 features &= ~NETIF_F_GSO;
9120 }
9121
Alexander Duyck802ab552016-04-10 21:45:03 -04009122 /* GSO partial features require GSO partial be set */
9123 if ((features & dev->gso_partial_features) &&
9124 !(features & NETIF_F_GSO_PARTIAL)) {
9125 netdev_dbg(dev,
9126 "Dropping partially supported GSO features since no GSO partial.\n");
9127 features &= ~dev->gso_partial_features;
9128 }
9129
Michael Chanfb1f5f72017-12-16 03:09:40 -05009130 if (!(features & NETIF_F_RXCSUM)) {
9131 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9132 * successfully merged by hardware must also have the
9133 * checksum verified by hardware. If the user does not
9134 * want to enable RXCSUM, logically, we should disable GRO_HW.
9135 */
9136 if (features & NETIF_F_GRO_HW) {
9137 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9138 features &= ~NETIF_F_GRO_HW;
9139 }
9140 }
9141
Gal Pressmande8d5ab2018-03-12 11:48:49 +02009142 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9143 if (features & NETIF_F_RXFCS) {
9144 if (features & NETIF_F_LRO) {
9145 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9146 features &= ~NETIF_F_LRO;
9147 }
9148
9149 if (features & NETIF_F_GRO_HW) {
9150 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9151 features &= ~NETIF_F_GRO_HW;
9152 }
Gal Pressmane6c6a922018-03-04 14:12:04 +02009153 }
9154
Herbert Xub63365a2008-10-23 01:11:29 -07009155 return features;
9156}
Herbert Xub63365a2008-10-23 01:11:29 -07009157
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009158int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00009159{
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009160 struct net_device *upper, *lower;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00009161 netdev_features_t features;
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009162 struct list_head *iter;
Jarod Wilsone7868a82015-11-03 23:09:32 -05009163 int err = -1;
Michał Mirosław5455c692011-02-15 16:59:17 +00009164
Michał Mirosław87267482011-04-12 09:56:38 +00009165 ASSERT_RTNL();
9166
Michał Mirosław5455c692011-02-15 16:59:17 +00009167 features = netdev_get_wanted_features(dev);
9168
9169 if (dev->netdev_ops->ndo_fix_features)
9170 features = dev->netdev_ops->ndo_fix_features(dev, features);
9171
9172 /* driver might be less strict about feature dependencies */
9173 features = netdev_fix_features(dev, features);
9174
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009175 /* some features can't be enabled if they're off an an upper device */
9176 netdev_for_each_upper_dev_rcu(dev, upper, iter)
9177 features = netdev_sync_upper_features(dev, upper, features);
9178
Michał Mirosław5455c692011-02-15 16:59:17 +00009179 if (dev->features == features)
Jarod Wilsone7868a82015-11-03 23:09:32 -05009180 goto sync_lower;
Michał Mirosław5455c692011-02-15 16:59:17 +00009181
Michał Mirosławc8f44af2011-11-15 15:29:55 +00009182 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9183 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00009184
9185 if (dev->netdev_ops->ndo_set_features)
9186 err = dev->netdev_ops->ndo_set_features(dev, features);
Nikolay Aleksandrov5f8dc332015-11-13 14:54:01 +01009187 else
9188 err = 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00009189
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009190 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00009191 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00009192 "set_features() failed (%d); wanted %pNF, left %pNF\n",
9193 err, &features, &dev->features);
Nikolay Aleksandrov17b85d22015-11-17 15:49:06 +01009194 /* return non-0 since some features might have changed and
9195 * it's better to fire a spurious notification than miss it
9196 */
9197 return -1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009198 }
9199
Jarod Wilsone7868a82015-11-03 23:09:32 -05009200sync_lower:
Jarod Wilsonfd867d52015-11-02 21:55:59 -05009201 /* some features must be disabled on lower devices when disabled
9202 * on an upper device (think: bonding master or bridge)
9203 */
9204 netdev_for_each_lower_dev(dev, lower, iter)
9205 netdev_sync_lower_features(dev, lower, features);
9206
Sabrina Dubrocaae847f42017-07-21 12:49:31 +02009207 if (!err) {
9208 netdev_features_t diff = features ^ dev->features;
9209
9210 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9211 /* udp_tunnel_{get,drop}_rx_info both need
9212 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9213 * device, or they won't do anything.
9214 * Thus we need to update dev->features
9215 * *before* calling udp_tunnel_get_rx_info,
9216 * but *after* calling udp_tunnel_drop_rx_info.
9217 */
9218 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9219 dev->features = features;
9220 udp_tunnel_get_rx_info(dev);
9221 } else {
9222 udp_tunnel_drop_rx_info(dev);
9223 }
9224 }
9225
Gal Pressman9daae9b2018-03-28 17:46:54 +03009226 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9227 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9228 dev->features = features;
9229 err |= vlan_get_rx_ctag_filter_info(dev);
9230 } else {
9231 vlan_drop_rx_ctag_filter_info(dev);
9232 }
9233 }
9234
9235 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9236 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9237 dev->features = features;
9238 err |= vlan_get_rx_stag_filter_info(dev);
9239 } else {
9240 vlan_drop_rx_stag_filter_info(dev);
9241 }
9242 }
9243
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009244 dev->features = features;
Sabrina Dubrocaae847f42017-07-21 12:49:31 +02009245 }
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009246
Jarod Wilsone7868a82015-11-03 23:09:32 -05009247 return err < 0 ? 0 : 1;
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009248}
9249
Michał Mirosławafe12cc2011-05-07 03:22:17 +00009250/**
9251 * netdev_update_features - recalculate device features
9252 * @dev: the device to check
9253 *
9254 * Recalculate dev->features set and send notifications if it
9255 * has changed. Should be called after driver or hardware dependent
9256 * conditions might have changed that influence the features.
9257 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009258void netdev_update_features(struct net_device *dev)
9259{
9260 if (__netdev_update_features(dev))
9261 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00009262}
9263EXPORT_SYMBOL(netdev_update_features);
9264
Linus Torvalds1da177e2005-04-16 15:20:36 -07009265/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00009266 * netdev_change_features - recalculate device features
9267 * @dev: the device to check
9268 *
9269 * Recalculate dev->features set and send notifications even
9270 * if they have not changed. Should be called instead of
9271 * netdev_update_features() if also dev->vlan_features might
9272 * have changed to allow the changes to be propagated to stacked
9273 * VLAN devices.
9274 */
9275void netdev_change_features(struct net_device *dev)
9276{
9277 __netdev_update_features(dev);
9278 netdev_features_change(dev);
9279}
9280EXPORT_SYMBOL(netdev_change_features);
9281
9282/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08009283 * netif_stacked_transfer_operstate - transfer operstate
9284 * @rootdev: the root or lower level device to transfer state from
9285 * @dev: the device to transfer operstate to
9286 *
9287 * Transfer operational state from root to device. This is normally
9288 * called when a stacking relationship exists between the root
9289 * device and the device(a leaf device).
9290 */
9291void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9292 struct net_device *dev)
9293{
9294 if (rootdev->operstate == IF_OPER_DORMANT)
9295 netif_dormant_on(dev);
9296 else
9297 netif_dormant_off(dev);
9298
Andrew Lunneec517cd2020-04-20 00:11:50 +02009299 if (rootdev->operstate == IF_OPER_TESTING)
9300 netif_testing_on(dev);
9301 else
9302 netif_testing_off(dev);
9303
Zhang Shengju0575c862017-04-26 17:49:38 +08009304 if (netif_carrier_ok(rootdev))
9305 netif_carrier_on(dev);
9306 else
9307 netif_carrier_off(dev);
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08009308}
9309EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9310
Eric Dumazet1b4bf462010-09-23 17:26:35 +00009311static int netif_alloc_rx_queues(struct net_device *dev)
9312{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00009313 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00009314 struct netdev_rx_queue *rx;
Pankaj Gupta10595902015-01-12 11:41:28 +05309315 size_t sz = count * sizeof(*rx);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009316 int err = 0;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00009317
Tom Herbertbd25fa72010-10-18 18:00:16 +00009318 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00009319
Michal Hockodcda9b02017-07-12 14:36:45 -07009320 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hockoda6bc572017-05-08 15:57:31 -07009321 if (!rx)
9322 return -ENOMEM;
9323
Tom Herbertbd25fa72010-10-18 18:00:16 +00009324 dev->_rx = rx;
9325
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009326 for (i = 0; i < count; i++) {
Tom Herbertfe822242010-11-09 10:47:38 +00009327 rx[i].dev = dev;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009328
9329 /* XDP RX-queue setup */
9330 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
9331 if (err < 0)
9332 goto err_rxq_info;
9333 }
Eric Dumazet1b4bf462010-09-23 17:26:35 +00009334 return 0;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009335
9336err_rxq_info:
9337 /* Rollback successful reg's and free other resources */
9338 while (i--)
9339 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
Jakub Kicinski141b52a2018-01-10 01:20:01 -08009340 kvfree(dev->_rx);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009341 dev->_rx = NULL;
9342 return err;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00009343}
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009344
9345static void netif_free_rx_queues(struct net_device *dev)
9346{
9347 unsigned int i, count = dev->num_rx_queues;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009348
9349 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9350 if (!dev->_rx)
9351 return;
9352
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009353 for (i = 0; i < count; i++)
Jakub Kicinski82aaff22018-01-10 01:20:02 -08009354 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9355
9356 kvfree(dev->_rx);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +01009357}
Eric Dumazet1b4bf462010-09-23 17:26:35 +00009358
Changli Gaoaa942102010-12-04 02:31:41 +00009359static void netdev_init_one_queue(struct net_device *dev,
9360 struct netdev_queue *queue, void *_unused)
9361{
9362 /* Initialize queue lock */
9363 spin_lock_init(&queue->_xmit_lock);
Cong Wang1a33e102020-05-02 22:22:19 -07009364 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
Changli Gaoaa942102010-12-04 02:31:41 +00009365 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00009366 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00009367 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00009368#ifdef CONFIG_BQL
9369 dql_init(&queue->dql, HZ);
9370#endif
Changli Gaoaa942102010-12-04 02:31:41 +00009371}
9372
Eric Dumazet60877a32013-06-20 01:15:51 -07009373static void netif_free_tx_queues(struct net_device *dev)
9374{
WANG Cong4cb28972014-06-02 15:55:22 -07009375 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07009376}
9377
Tom Herberte6484932010-10-18 18:04:39 +00009378static int netif_alloc_netdev_queues(struct net_device *dev)
9379{
9380 unsigned int count = dev->num_tx_queues;
9381 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07009382 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00009383
Eric Dumazetd3397272015-07-06 17:13:26 +02009384 if (count < 1 || count > 0xffff)
9385 return -EINVAL;
Tom Herberte6484932010-10-18 18:04:39 +00009386
Michal Hockodcda9b02017-07-12 14:36:45 -07009387 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Michal Hockoda6bc572017-05-08 15:57:31 -07009388 if (!tx)
9389 return -ENOMEM;
9390
Tom Herberte6484932010-10-18 18:04:39 +00009391 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00009392
Tom Herberte6484932010-10-18 18:04:39 +00009393 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
9394 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00009395
9396 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00009397}
9398
Denys Vlasenkoa2029242015-05-11 21:17:53 +02009399void netif_tx_stop_all_queues(struct net_device *dev)
9400{
9401 unsigned int i;
9402
9403 for (i = 0; i < dev->num_tx_queues; i++) {
9404 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
tchardingf4563a72017-02-09 17:56:07 +11009405
Denys Vlasenkoa2029242015-05-11 21:17:53 +02009406 netif_tx_stop_queue(txq);
9407 }
9408}
9409EXPORT_SYMBOL(netif_tx_stop_all_queues);
9410
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08009411/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009412 * register_netdevice - register a network device
9413 * @dev: device to register
9414 *
9415 * Take a completed network device structure and add it to the kernel
9416 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9417 * chain. 0 is returned on success. A negative errno code is returned
9418 * on a failure to set up the device, or if the name is a duplicate.
9419 *
9420 * Callers must hold the rtnl semaphore. You may want
9421 * register_netdev() instead of this.
9422 *
9423 * BUGS:
9424 * The locking appears insufficient to guarantee two parallel registers
9425 * will not get the same name.
9426 */
9427
9428int register_netdevice(struct net_device *dev)
9429{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009430 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08009431 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009432
Florian Fainellie283de32018-04-30 14:20:05 -07009433 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
9434 NETDEV_FEATURE_COUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009435 BUG_ON(dev_boot_phase);
9436 ASSERT_RTNL();
9437
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07009438 might_sleep();
9439
Linus Torvalds1da177e2005-04-16 15:20:36 -07009440 /* When net_device's are persistent, this will be fatal. */
9441 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08009442 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009443
Jakub Kicinski9000edb2020-03-16 13:47:12 -07009444 ret = ethtool_check_ops(dev->ethtool_ops);
9445 if (ret)
9446 return ret;
9447
David S. Millerf1f28aa2008-07-15 00:08:33 -07009448 spin_lock_init(&dev->addr_list_lock);
Cong Wang845e0eb2020-06-08 14:53:01 -07009449 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009450
Gao feng828de4f2012-09-13 20:58:27 +00009451 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00009452 if (ret < 0)
9453 goto out;
9454
Eric Dumazet9077f052019-10-03 08:59:24 -07009455 ret = -ENOMEM;
Jiri Pirkoff927412019-09-30 11:48:15 +02009456 dev->name_node = netdev_name_node_head_alloc(dev);
9457 if (!dev->name_node)
9458 goto out;
9459
Linus Torvalds1da177e2005-04-16 15:20:36 -07009460 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08009461 if (dev->netdev_ops->ndo_init) {
9462 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009463 if (ret) {
9464 if (ret > 0)
9465 ret = -EIO;
Dan Carpenter42c17fa2019-12-03 17:12:39 +03009466 goto err_free_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009467 }
9468 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09009469
Patrick McHardyf6469682013-04-19 02:04:27 +00009470 if (((dev->hw_features | dev->features) &
9471 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00009472 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
9473 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
9474 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
9475 ret = -EINVAL;
9476 goto err_uninit;
9477 }
9478
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00009479 ret = -EBUSY;
9480 if (!dev->ifindex)
9481 dev->ifindex = dev_new_index(net);
9482 else if (__dev_get_by_index(net, dev->ifindex))
9483 goto err_uninit;
9484
Michał Mirosław5455c692011-02-15 16:59:17 +00009485 /* Transfer changeable features to wanted_features and enable
9486 * software offloads (GSO and GRO).
9487 */
Steffen Klassert1a3c9982020-01-25 11:26:43 +01009488 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
Michał Mirosław14d12322011-02-22 16:52:28 +00009489 dev->features |= NETIF_F_SOFT_FEATURES;
Sabrina Dubrocad764a122017-07-21 12:49:28 +02009490
9491 if (dev->netdev_ops->ndo_udp_tunnel_add) {
9492 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9493 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
9494 }
9495
Michał Mirosław14d12322011-02-22 16:52:28 +00009496 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497
Alexander Duyckcbc53e02016-04-10 21:44:51 -04009498 if (!(dev->flags & IFF_LOOPBACK))
Michał Mirosław34324dc2011-11-15 15:29:55 +00009499 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Alexander Duyckcbc53e02016-04-10 21:44:51 -04009500
Alexander Duyck7f348a62016-04-20 16:51:00 -04009501 /* If IPv4 TCP segmentation offload is supported we should also
9502 * allow the device to enable segmenting the frame with the option
9503 * of ignoring a static IP ID value. This doesn't enable the
9504 * feature itself but allows the user to enable it later.
9505 */
Alexander Duyckcbc53e02016-04-10 21:44:51 -04009506 if (dev->hw_features & NETIF_F_TSO)
9507 dev->hw_features |= NETIF_F_TSO_MANGLEID;
Alexander Duyck7f348a62016-04-20 16:51:00 -04009508 if (dev->vlan_features & NETIF_F_TSO)
9509 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
9510 if (dev->mpls_features & NETIF_F_TSO)
9511 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
9512 if (dev->hw_enc_features & NETIF_F_TSO)
9513 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07009514
Michał Mirosław1180e7d2011-07-14 14:41:11 -07009515 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00009516 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07009517 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00009518
Pravin B Shelaree579672013-03-07 09:28:08 +00009519 /* Make NETIF_F_SG inheritable to tunnel devices.
9520 */
Alexander Duyck802ab552016-04-10 21:45:03 -04009521 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
Pravin B Shelaree579672013-03-07 09:28:08 +00009522
Simon Horman0d89d202013-05-23 21:02:52 +00009523 /* Make NETIF_F_SG inheritable to MPLS.
9524 */
9525 dev->mpls_features |= NETIF_F_SG;
9526
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00009527 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
9528 ret = notifier_to_errno(ret);
9529 if (ret)
9530 goto err_uninit;
9531
Eric W. Biederman8b41d182007-09-26 22:02:53 -07009532 ret = netdev_register_kobject(dev);
Jouni Hogandercb626bf2020-01-20 09:51:03 +02009533 if (ret) {
9534 dev->reg_state = NETREG_UNREGISTERED;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07009535 goto err_uninit;
Jouni Hogandercb626bf2020-01-20 09:51:03 +02009536 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07009537 dev->reg_state = NETREG_REGISTERED;
9538
Michał Mirosław6cb6a272011-04-02 22:48:47 -07009539 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00009540
Linus Torvalds1da177e2005-04-16 15:20:36 -07009541 /*
9542 * Default initial state at registry is that the
9543 * device is present.
9544 */
9545
9546 set_bit(__LINK_STATE_PRESENT, &dev->state);
9547
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01009548 linkwatch_init_dev(dev);
9549
Linus Torvalds1da177e2005-04-16 15:20:36 -07009550 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009551 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02009552 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04009553 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009554
Jiri Pirko948b3372013-01-08 01:38:25 +00009555 /* If the device has permanent device address, driver should
9556 * set dev_addr and also addr_assign_type should be set to
9557 * NET_ADDR_PERM (default value).
9558 */
9559 if (dev->addr_assign_type == NET_ADDR_PERM)
9560 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9561
Linus Torvalds1da177e2005-04-16 15:20:36 -07009562 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07009563 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07009564 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009565 if (ret) {
9566 rollback_registered(dev);
Subash Abhinov Kasiviswanathan10cc5142019-09-10 14:02:57 -06009567 rcu_barrier();
9568
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009569 dev->reg_state = NETREG_UNREGISTERED;
Yang Yingliang814152a2020-06-16 09:39:21 +00009570 /* We should put the kobject that hold in
9571 * netdev_unregister_kobject(), otherwise
9572 * the net device cannot be freed when
9573 * driver calls free_netdev(), because the
9574 * kobject is being hold.
9575 */
9576 kobject_put(&dev->dev.kobj);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07009577 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00009578 /*
9579 * Prevent userspace races by waiting until the network
9580 * device is fully setup before sending notifications.
9581 */
Patrick McHardya2835762010-02-26 06:34:51 +00009582 if (!dev->rtnl_link_ops ||
9583 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07009584 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009585
9586out:
9587 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07009588
9589err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08009590 if (dev->netdev_ops->ndo_uninit)
9591 dev->netdev_ops->ndo_uninit(dev);
David S. Millercf124db2017-05-08 12:52:56 -04009592 if (dev->priv_destructor)
9593 dev->priv_destructor(dev);
Dan Carpenter42c17fa2019-12-03 17:12:39 +03009594err_free_name:
9595 netdev_name_node_free(dev->name_node);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07009596 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009597}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07009598EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009599
9600/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08009601 * init_dummy_netdev - init a dummy network device for NAPI
9602 * @dev: device to init
9603 *
9604 * This takes a network device structure and initialize the minimum
9605 * amount of fields so it can be used to schedule NAPI polls without
9606 * registering a full blown interface. This is to be used by drivers
9607 * that need to tie several hardware interfaces to a single NAPI
9608 * poll scheduler due to HW limitations.
9609 */
9610int init_dummy_netdev(struct net_device *dev)
9611{
9612 /* Clear everything. Note we don't initialize spinlocks
9613 * are they aren't supposed to be taken by any of the
9614 * NAPI code and this dummy netdev is supposed to be
9615 * only ever used for NAPI polls
9616 */
9617 memset(dev, 0, sizeof(struct net_device));
9618
9619 /* make sure we BUG if trying to hit standard
9620 * register/unregister code path
9621 */
9622 dev->reg_state = NETREG_DUMMY;
9623
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08009624 /* NAPI wants this */
9625 INIT_LIST_HEAD(&dev->napi_list);
9626
9627 /* a dummy interface is started by default */
9628 set_bit(__LINK_STATE_PRESENT, &dev->state);
9629 set_bit(__LINK_STATE_START, &dev->state);
9630
Josh Elsasser35edfdc2019-01-26 14:38:33 -08009631 /* napi_busy_loop stats accounting wants this */
9632 dev_net_set(dev, &init_net);
9633
Eric Dumazet29b44332010-10-11 10:22:12 +00009634 /* Note : We dont allocate pcpu_refcnt for dummy devices,
9635 * because users of this 'device' dont need to change
9636 * its refcount.
9637 */
9638
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08009639 return 0;
9640}
9641EXPORT_SYMBOL_GPL(init_dummy_netdev);
9642
9643
9644/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009645 * register_netdev - register a network device
9646 * @dev: device to register
9647 *
9648 * Take a completed network device structure and add it to the kernel
9649 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
9650 * chain. 0 is returned on success. A negative errno code is returned
9651 * on a failure to set up the device, or if the name is a duplicate.
9652 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07009653 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07009654 * and expands the device name if you passed a format string to
9655 * alloc_netdev.
9656 */
9657int register_netdev(struct net_device *dev)
9658{
9659 int err;
9660
Kirill Tkhaib0f3deb2018-03-14 22:17:28 +03009661 if (rtnl_lock_killable())
9662 return -EINTR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009663 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009664 rtnl_unlock();
9665 return err;
9666}
9667EXPORT_SYMBOL(register_netdev);
9668
Eric Dumazet29b44332010-10-11 10:22:12 +00009669int netdev_refcnt_read(const struct net_device *dev)
9670{
9671 int i, refcnt = 0;
9672
9673 for_each_possible_cpu(i)
9674 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
9675 return refcnt;
9676}
9677EXPORT_SYMBOL(netdev_refcnt_read);
9678
Ben Hutchings2c530402012-07-10 10:55:09 +00009679/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009680 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00009681 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07009682 *
9683 * This is called when unregistering network devices.
9684 *
9685 * Any protocol or device that holds a reference should register
9686 * for netdevice notification, and cleanup and put back the
9687 * reference if they receive an UNREGISTER event.
9688 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09009689 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009690 */
9691static void netdev_wait_allrefs(struct net_device *dev)
9692{
9693 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00009694 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009695
Eric Dumazete014deb2009-11-17 05:59:21 +00009696 linkwatch_forget_dev(dev);
9697
Linus Torvalds1da177e2005-04-16 15:20:36 -07009698 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00009699 refcnt = netdev_refcnt_read(dev);
9700
9701 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009702 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08009703 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009704
9705 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07009706 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009707
Eric Dumazet748e2d92012-08-22 21:50:59 +00009708 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00009709 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00009710 rtnl_lock();
9711
Linus Torvalds1da177e2005-04-16 15:20:36 -07009712 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
9713 &dev->state)) {
9714 /* We must not have linkwatch events
9715 * pending on unregister. If this
9716 * happens, we simply run the queue
9717 * unscheduled, resulting in a noop
9718 * for this device.
9719 */
9720 linkwatch_run_queue();
9721 }
9722
Stephen Hemminger6756ae42006-03-20 22:23:58 -08009723 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009724
9725 rebroadcast_time = jiffies;
9726 }
9727
9728 msleep(250);
9729
Eric Dumazet29b44332010-10-11 10:22:12 +00009730 refcnt = netdev_refcnt_read(dev);
9731
Eric Dumazetd7c04b02019-05-16 08:09:57 -07009732 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00009733 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
9734 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009735 warning_time = jiffies;
9736 }
9737 }
9738}
9739
9740/* The sequence is:
9741 *
9742 * rtnl_lock();
9743 * ...
9744 * register_netdevice(x1);
9745 * register_netdevice(x2);
9746 * ...
9747 * unregister_netdevice(y1);
9748 * unregister_netdevice(y2);
9749 * ...
9750 * rtnl_unlock();
9751 * free_netdev(y1);
9752 * free_netdev(y2);
9753 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07009754 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07009755 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07009756 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07009757 * without deadlocking with linkwatch via keventd.
9758 * 2) Since we run with the RTNL semaphore not held, we can sleep
9759 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07009760 *
9761 * We must not return until all unregister events added during
9762 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009763 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009764void netdev_run_todo(void)
9765{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07009766 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009767
Linus Torvalds1da177e2005-04-16 15:20:36 -07009768 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07009769 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07009770
9771 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07009772
Eric Dumazet0115e8e2012-08-22 17:19:46 +00009773
9774 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00009775 if (!list_empty(&list))
9776 rcu_barrier();
9777
Linus Torvalds1da177e2005-04-16 15:20:36 -07009778 while (!list_empty(&list)) {
9779 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00009780 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009781 list_del(&dev->todo_list);
9782
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07009783 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00009784 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07009785 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07009786 dump_stack();
9787 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009788 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07009789
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07009790 dev->reg_state = NETREG_UNREGISTERED;
9791
9792 netdev_wait_allrefs(dev);
9793
9794 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00009795 BUG_ON(netdev_refcnt_read(dev));
Salam Noureddine7866a622015-01-27 11:35:48 -08009796 BUG_ON(!list_empty(&dev->ptype_all));
9797 BUG_ON(!list_empty(&dev->ptype_specific));
Eric Dumazet33d480c2011-08-11 19:30:52 +00009798 WARN_ON(rcu_access_pointer(dev->ip_ptr));
9799 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
David Ahern330c7272018-02-13 08:52:00 -08009800#if IS_ENABLED(CONFIG_DECNET)
Ilpo Järvinen547b7922008-07-25 21:43:18 -07009801 WARN_ON(dev->dn_ptr);
David Ahern330c7272018-02-13 08:52:00 -08009802#endif
David S. Millercf124db2017-05-08 12:52:56 -04009803 if (dev->priv_destructor)
9804 dev->priv_destructor(dev);
9805 if (dev->needs_free_netdev)
9806 free_netdev(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07009807
Eric W. Biederman50624c92013-09-23 21:19:49 -07009808 /* Report a network device has been unregistered */
9809 rtnl_lock();
9810 dev_net(dev)->dev_unreg_count--;
9811 __rtnl_unlock();
9812 wake_up(&netdev_unregistering_wq);
9813
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07009814 /* Free network device */
9815 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009817}
9818
Jarod Wilson92566452016-02-01 18:51:04 -05009819/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
9820 * all the same fields in the same order as net_device_stats, with only
9821 * the type differing, but rtnl_link_stats64 may have additional fields
9822 * at the end for newer counters.
Ben Hutchings3cfde792010-07-09 09:11:52 +00009823 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00009824void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
9825 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00009826{
9827#if BITS_PER_LONG == 64
Jarod Wilson92566452016-02-01 18:51:04 -05009828 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
Alban Browaeys9af99592017-07-03 03:20:13 +02009829 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
Jarod Wilson92566452016-02-01 18:51:04 -05009830 /* zero out counters that only exist in rtnl_link_stats64 */
9831 memset((char *)stats64 + sizeof(*netdev_stats), 0,
9832 sizeof(*stats64) - sizeof(*netdev_stats));
Ben Hutchings3cfde792010-07-09 09:11:52 +00009833#else
Jarod Wilson92566452016-02-01 18:51:04 -05009834 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
Ben Hutchings3cfde792010-07-09 09:11:52 +00009835 const unsigned long *src = (const unsigned long *)netdev_stats;
9836 u64 *dst = (u64 *)stats64;
9837
Jarod Wilson92566452016-02-01 18:51:04 -05009838 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00009839 for (i = 0; i < n; i++)
9840 dst[i] = src[i];
Jarod Wilson92566452016-02-01 18:51:04 -05009841 /* zero out counters that only exist in rtnl_link_stats64 */
9842 memset((char *)stats64 + n * sizeof(u64), 0,
9843 sizeof(*stats64) - n * sizeof(u64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00009844#endif
9845}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00009846EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00009847
Eric Dumazetd83345a2009-11-16 03:36:51 +00009848/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08009849 * dev_get_stats - get network device statistics
9850 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07009851 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08009852 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00009853 * Get network statistics from device. Return @storage.
9854 * The device driver may provide its own method by setting
9855 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
9856 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08009857 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00009858struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
9859 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00009860{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08009861 const struct net_device_ops *ops = dev->netdev_ops;
9862
Eric Dumazet28172732010-07-07 14:58:56 -07009863 if (ops->ndo_get_stats64) {
9864 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00009865 ops->ndo_get_stats64(dev, storage);
9866 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00009867 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00009868 } else {
9869 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07009870 }
Eric Dumazet6f64ec72017-06-27 07:02:20 -07009871 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
9872 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
9873 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
Eric Dumazet28172732010-07-07 14:58:56 -07009874 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07009875}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08009876EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07009877
Eric Dumazet24824a02010-10-02 06:11:55 +00009878struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07009879{
Eric Dumazet24824a02010-10-02 06:11:55 +00009880 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07009881
Eric Dumazet24824a02010-10-02 06:11:55 +00009882#ifdef CONFIG_NET_CLS_ACT
9883 if (queue)
9884 return queue;
9885 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
9886 if (!queue)
9887 return NULL;
9888 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet2ce1ee12015-02-04 13:37:44 -08009889 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
Eric Dumazet24824a02010-10-02 06:11:55 +00009890 queue->qdisc_sleeping = &noop_qdisc;
9891 rcu_assign_pointer(dev->ingress_queue, queue);
9892#endif
9893 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07009894}
9895
Eric Dumazet2c60db02012-09-16 09:17:26 +00009896static const struct ethtool_ops default_ethtool_ops;
9897
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00009898void netdev_set_default_ethtool_ops(struct net_device *dev,
9899 const struct ethtool_ops *ops)
9900{
9901 if (dev->ethtool_ops == &default_ethtool_ops)
9902 dev->ethtool_ops = ops;
9903}
9904EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
9905
Eric Dumazet74d332c2013-10-30 13:10:44 -07009906void netdev_freemem(struct net_device *dev)
9907{
9908 char *addr = (char *)dev - dev->padded;
9909
WANG Cong4cb28972014-06-02 15:55:22 -07009910 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07009911}
9912
Linus Torvalds1da177e2005-04-16 15:20:36 -07009913/**
tcharding722c9a02017-02-09 17:56:04 +11009914 * alloc_netdev_mqs - allocate network device
9915 * @sizeof_priv: size of private data to allocate space for
9916 * @name: device name format string
9917 * @name_assign_type: origin of device name
9918 * @setup: callback to initialize device
9919 * @txqs: the number of TX subqueues to allocate
9920 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07009921 *
tcharding722c9a02017-02-09 17:56:04 +11009922 * Allocates a struct net_device with private data area for driver use
9923 * and performs basic initialization. Also allocates subqueue structs
9924 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009925 */
Tom Herbert36909ea2011-01-09 19:36:31 +00009926struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02009927 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00009928 void (*setup)(struct net_device *),
9929 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009930{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009931 struct net_device *dev;
Alexey Dobriyan52a59bd2017-09-21 23:33:29 +03009932 unsigned int alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00009933 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009934
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07009935 BUG_ON(strlen(name) >= sizeof(dev->name));
9936
Tom Herbert36909ea2011-01-09 19:36:31 +00009937 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00009938 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00009939 return NULL;
9940 }
9941
Tom Herbert36909ea2011-01-09 19:36:31 +00009942 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00009943 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00009944 return NULL;
9945 }
Tom Herbert36909ea2011-01-09 19:36:31 +00009946
David S. Millerfd2ea0a2008-07-17 01:56:23 -07009947 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07009948 if (sizeof_priv) {
9949 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00009950 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07009951 alloc_size += sizeof_priv;
9952 }
9953 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00009954 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009955
Michal Hockodcda9b02017-07-12 14:36:45 -07009956 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
Joe Perches62b59422013-02-04 16:48:16 +00009957 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009958 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009959
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00009960 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009961 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00009962
Eric Dumazet29b44332010-10-11 10:22:12 +00009963 dev->pcpu_refcnt = alloc_percpu(int);
9964 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07009965 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00009966
Linus Torvalds1da177e2005-04-16 15:20:36 -07009967 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00009968 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009969
Jiri Pirko22bedad32010-04-01 21:22:57 +00009970 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00009971 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00009972
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09009973 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009974
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07009975 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00009976 dev->gso_max_segs = GSO_MAX_SEGS;
Taehee Yoo5343da42019-10-21 18:47:50 +00009977 dev->upper_level = 1;
9978 dev->lower_level = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009979
Herbert Xud565b0a2008-12-15 23:38:52 -08009980 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00009981 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07009982 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00009983 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02009984 INIT_LIST_HEAD(&dev->adj_list.upper);
9985 INIT_LIST_HEAD(&dev->adj_list.lower);
Salam Noureddine7866a622015-01-27 11:35:48 -08009986 INIT_LIST_HEAD(&dev->ptype_all);
9987 INIT_LIST_HEAD(&dev->ptype_specific);
Jiri Pirko93642e12020-01-25 12:17:08 +01009988 INIT_LIST_HEAD(&dev->net_notifier_list);
Jiri Kosina59cc1f62016-08-10 11:05:15 +02009989#ifdef CONFIG_NET_SCHED
9990 hash_init(dev->qdisc_hash);
9991#endif
Eric Dumazet02875872014-10-05 18:38:35 -07009992 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009993 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08009994
Phil Suttera8131042016-02-17 15:37:43 +01009995 if (!dev->tx_queue_len) {
Phil Sutterf84bb1e2015-08-27 21:21:36 +02009996 dev->priv_flags |= IFF_NO_QUEUE;
Jesper Dangaard Brouer11597082016-11-03 14:56:06 +01009997 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
Phil Suttera8131042016-02-17 15:37:43 +01009998 }
Phil Sutter906470c2015-08-18 10:30:48 +02009999
David S. Miller8d3bdbd2011-02-08 15:02:50 -080010000 dev->num_tx_queues = txqs;
10001 dev->real_num_tx_queues = txqs;
10002 if (netif_alloc_netdev_queues(dev))
10003 goto free_all;
10004
David S. Miller8d3bdbd2011-02-08 15:02:50 -080010005 dev->num_rx_queues = rxqs;
10006 dev->real_num_rx_queues = rxqs;
10007 if (netif_alloc_rx_queues(dev))
10008 goto free_all;
David S. Miller8d3bdbd2011-02-08 15:02:50 -080010009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010010 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +020010011 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +000010012 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +000010013 if (!dev->ethtool_ops)
10014 dev->ethtool_ops = &default_ethtool_ops;
Pablo Neirae687ad62015-05-13 18:19:38 +020010015
Daniel Borkmann357b6cc2020-03-18 10:33:22 +010010016 nf_hook_ingress_init(dev);
Pablo Neirae687ad62015-05-13 18:19:38 +020010017
Linus Torvalds1da177e2005-04-16 15:20:36 -070010018 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +000010019
David S. Miller8d3bdbd2011-02-08 15:02:50 -080010020free_all:
10021 free_netdev(dev);
10022 return NULL;
10023
Eric Dumazet29b44332010-10-11 10:22:12 +000010024free_pcpu:
10025 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -070010026free_dev:
10027 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +000010028 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010029}
Tom Herbert36909ea2011-01-09 19:36:31 +000010030EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010031
10032/**
tcharding722c9a02017-02-09 17:56:04 +110010033 * free_netdev - free network device
10034 * @dev: device
Linus Torvalds1da177e2005-04-16 15:20:36 -070010035 *
tcharding722c9a02017-02-09 17:56:04 +110010036 * This function does the last stage of destroying an allocated device
10037 * interface. The reference to the device object is released. If this
10038 * is the last reference then it will be freed.Must be called in process
10039 * context.
Linus Torvalds1da177e2005-04-16 15:20:36 -070010040 */
10041void free_netdev(struct net_device *dev)
10042{
Herbert Xud565b0a2008-12-15 23:38:52 -080010043 struct napi_struct *p, *n;
10044
Eric Dumazet93d05d42015-11-18 06:31:03 -080010045 might_sleep();
Eric Dumazet60877a32013-06-20 01:15:51 -070010046 netif_free_tx_queues(dev);
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +010010047 netif_free_rx_queues(dev);
David S. Millere8a04642008-07-17 00:34:19 -070010048
Eric Dumazet33d480c2011-08-11 19:30:52 +000010049 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +000010050
Jiri Pirkof001fde2009-05-05 02:48:28 +000010051 /* Flush device addresses */
10052 dev_addr_flush(dev);
10053
Herbert Xud565b0a2008-12-15 23:38:52 -080010054 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10055 netif_napi_del(p);
10056
Eric Dumazet29b44332010-10-11 10:22:12 +000010057 free_percpu(dev->pcpu_refcnt);
10058 dev->pcpu_refcnt = NULL;
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +010010059 free_percpu(dev->xdp_bulkq);
10060 dev->xdp_bulkq = NULL;
Eric Dumazet29b44332010-10-11 10:22:12 +000010061
Stephen Hemminger3041a062006-05-26 13:25:24 -070010062 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010063 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -070010064 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010065 return;
10066 }
10067
10068 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10069 dev->reg_state = NETREG_RELEASED;
10070
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070010071 /* will free via device release */
10072 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010073}
Eric Dumazetd1b19df2009-09-03 01:29:39 -070010074EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090010075
Stephen Hemmingerf0db2752008-09-30 02:23:58 -070010076/**
10077 * synchronize_net - Synchronize with packet receive processing
10078 *
10079 * Wait for packets currently being received to be done.
10080 * Does not block later packets from starting.
10081 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090010082void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010083{
10084 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +000010085 if (rtnl_is_locked())
10086 synchronize_rcu_expedited();
10087 else
10088 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010089}
Eric Dumazetd1b19df2009-09-03 01:29:39 -070010090EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010091
10092/**
Eric Dumazet44a08732009-10-27 07:03:04 +000010093 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -070010094 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +000010095 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -080010096 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010097 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -080010098 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +000010099 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070010100 *
10101 * Callers must hold the rtnl semaphore. You may want
10102 * unregister_netdev() instead of this.
10103 */
10104
Eric Dumazet44a08732009-10-27 07:03:04 +000010105void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010106{
Herbert Xua6620712007-12-12 19:21:56 -080010107 ASSERT_RTNL();
10108
Eric Dumazet44a08732009-10-27 07:03:04 +000010109 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +000010110 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +000010111 } else {
10112 rollback_registered(dev);
10113 /* Finish processing unregister after unlock */
10114 net_set_todo(dev);
10115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010116}
Eric Dumazet44a08732009-10-27 07:03:04 +000010117EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010118
10119/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +000010120 * unregister_netdevice_many - unregister many devices
10121 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -070010122 *
10123 * Note: As most callers use a stack allocated list_head,
10124 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +000010125 */
10126void unregister_netdevice_many(struct list_head *head)
10127{
10128 struct net_device *dev;
10129
10130 if (!list_empty(head)) {
10131 rollback_registered_many(head);
10132 list_for_each_entry(dev, head, unreg_list)
10133 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -070010134 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +000010135 }
10136}
Eric Dumazet63c80992009-10-27 07:06:49 +000010137EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +000010138
10139/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010140 * unregister_netdev - remove device from the kernel
10141 * @dev: device
10142 *
10143 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -080010144 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -070010145 *
10146 * This is just a wrapper for unregister_netdevice that takes
10147 * the rtnl semaphore. In general you want to use this and not
10148 * unregister_netdevice.
10149 */
10150void unregister_netdev(struct net_device *dev)
10151{
10152 rtnl_lock();
10153 unregister_netdevice(dev);
10154 rtnl_unlock();
10155}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010156EXPORT_SYMBOL(unregister_netdev);
10157
Eric W. Biedermance286d32007-09-12 13:53:49 +020010158/**
10159 * dev_change_net_namespace - move device to different nethost namespace
10160 * @dev: device
10161 * @net: network namespace
10162 * @pat: If not NULL name pattern to try if the current device name
10163 * is already taken in the destination network namespace.
10164 *
10165 * This function shuts down a device interface and moves it
10166 * to a new network namespace. On success 0 is returned, on
10167 * a failure a netagive errno code is returned.
10168 *
10169 * Callers must hold the rtnl semaphore.
10170 */
10171
10172int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
10173{
Christian Brauneref6a4c82020-02-27 04:37:19 +010010174 struct net *net_old = dev_net(dev);
Nicolas Dichtel38e01b32018-01-25 15:01:39 +010010175 int err, new_nsid, new_ifindex;
Eric W. Biedermance286d32007-09-12 13:53:49 +020010176
10177 ASSERT_RTNL();
10178
10179 /* Don't allow namespace local devices to be moved. */
10180 err = -EINVAL;
10181 if (dev->features & NETIF_F_NETNS_LOCAL)
10182 goto out;
10183
10184 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +020010185 if (dev->reg_state != NETREG_REGISTERED)
10186 goto out;
10187
10188 /* Get out if there is nothing todo */
10189 err = 0;
Christian Brauneref6a4c82020-02-27 04:37:19 +010010190 if (net_eq(net_old, net))
Eric W. Biedermance286d32007-09-12 13:53:49 +020010191 goto out;
10192
10193 /* Pick the destination device name, and ensure
10194 * we can use it in the destination network namespace.
10195 */
10196 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +000010197 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +020010198 /* We get here if we can't use the current device name */
10199 if (!pat)
10200 goto out;
Li RongQing7892bd02018-06-19 17:23:17 +080010201 err = dev_get_valid_name(net, dev, pat);
10202 if (err < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +020010203 goto out;
10204 }
10205
10206 /*
10207 * And now a mini version of register_netdevice unregister_netdevice.
10208 */
10209
10210 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -070010211 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +020010212
10213 /* And unlink it from device chain */
Eric W. Biedermance286d32007-09-12 13:53:49 +020010214 unlist_netdevice(dev);
10215
10216 synchronize_net();
10217
10218 /* Shutdown queueing discipline. */
10219 dev_shutdown(dev);
10220
10221 /* Notify protocols, that we are about to destroy
tchardingeb13da12017-02-09 17:56:06 +110010222 * this device. They should clean all the things.
10223 *
10224 * Note that dev->reg_state stays at NETREG_REGISTERED.
10225 * This is wanted because this way 8021q and macvlan know
10226 * the device is just moving and can keep their slaves up.
10227 */
Eric W. Biedermance286d32007-09-12 13:53:49 +020010228 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +000010229 rcu_barrier();
Nicolas Dichtel38e01b32018-01-25 15:01:39 +010010230
Guillaume Naultd4e4fdf2019-10-23 18:39:04 +020010231 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
Nicolas Dichtel38e01b32018-01-25 15:01:39 +010010232 /* If there is an ifindex conflict assign a new one */
10233 if (__dev_get_by_index(net, dev->ifindex))
10234 new_ifindex = dev_new_index(net);
10235 else
10236 new_ifindex = dev->ifindex;
10237
10238 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
10239 new_ifindex);
Eric W. Biedermance286d32007-09-12 13:53:49 +020010240
10241 /*
10242 * Flush the unicast and multicast chains
10243 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +000010244 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +000010245 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +020010246
Serge Hallyn4e66ae22012-12-03 16:17:12 +000010247 /* Send a netdev-removed uevent to the old namespace */
10248 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +040010249 netdev_adjacent_del_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +000010250
Jiri Pirko93642e12020-01-25 12:17:08 +010010251 /* Move per-net netdevice notifiers that are following the netdevice */
10252 move_netdevice_notifiers_dev_net(dev, net);
10253
Eric W. Biedermance286d32007-09-12 13:53:49 +020010254 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +090010255 dev_net_set(dev, net);
Nicolas Dichtel38e01b32018-01-25 15:01:39 +010010256 dev->ifindex = new_ifindex;
Eric W. Biedermance286d32007-09-12 13:53:49 +020010257
Serge Hallyn4e66ae22012-12-03 16:17:12 +000010258 /* Send a netdev-add uevent to the new namespace */
10259 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
Alexander Y. Fomichev4c754312014-08-25 16:26:45 +040010260 netdev_adjacent_add_links(dev);
Serge Hallyn4e66ae22012-12-03 16:17:12 +000010261
Eric W. Biederman8b41d182007-09-26 22:02:53 -070010262 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -070010263 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -070010264 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +020010265
Christian Brauneref6a4c82020-02-27 04:37:19 +010010266 /* Adapt owner in case owning user namespace of target network
10267 * namespace is different from the original one.
10268 */
10269 err = netdev_change_owner(dev, net_old, net);
10270 WARN_ON(err);
10271
Eric W. Biedermance286d32007-09-12 13:53:49 +020010272 /* Add the device back in the hashes */
10273 list_netdevice(dev);
10274
10275 /* Notify protocols, that a new device appeared. */
10276 call_netdevice_notifiers(NETDEV_REGISTER, dev);
10277
Eric W. Biedermand90a9092009-12-12 22:11:15 +000010278 /*
10279 * Prevent userspace races by waiting until the network
10280 * device is fully setup before sending notifications.
10281 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -070010282 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +000010283
Eric W. Biedermance286d32007-09-12 13:53:49 +020010284 synchronize_net();
10285 err = 0;
10286out:
10287 return err;
10288}
Johannes Berg463d0182009-07-14 00:33:35 +020010289EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +020010290
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +010010291static int dev_cpu_dead(unsigned int oldcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010292{
10293 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010294 struct sk_buff *skb;
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +010010295 unsigned int cpu;
Ashwanth Goli97d8b6e2017-06-13 16:54:55 +053010296 struct softnet_data *sd, *oldsd, *remsd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010297
Linus Torvalds1da177e2005-04-16 15:20:36 -070010298 local_irq_disable();
10299 cpu = smp_processor_id();
10300 sd = &per_cpu(softnet_data, cpu);
10301 oldsd = &per_cpu(softnet_data, oldcpu);
10302
10303 /* Find end of our completion_queue. */
10304 list_skb = &sd->completion_queue;
10305 while (*list_skb)
10306 list_skb = &(*list_skb)->next;
10307 /* Append completion queue from offline CPU. */
10308 *list_skb = oldsd->completion_queue;
10309 oldsd->completion_queue = NULL;
10310
Linus Torvalds1da177e2005-04-16 15:20:36 -070010311 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +000010312 if (oldsd->output_queue) {
10313 *sd->output_queue_tailp = oldsd->output_queue;
10314 sd->output_queue_tailp = oldsd->output_queue_tailp;
10315 oldsd->output_queue = NULL;
10316 oldsd->output_queue_tailp = &oldsd->output_queue;
10317 }
Eric Dumazetac64da02015-01-15 17:04:22 -080010318 /* Append NAPI poll list from offline CPU, with one exception :
10319 * process_backlog() must be called by cpu owning percpu backlog.
10320 * We properly handle process_queue & input_pkt_queue later.
10321 */
10322 while (!list_empty(&oldsd->poll_list)) {
10323 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
10324 struct napi_struct,
10325 poll_list);
10326
10327 list_del_init(&napi->poll_list);
10328 if (napi->poll == process_backlog)
10329 napi->state = 0;
10330 else
10331 ____napi_schedule(sd, napi);
Heiko Carstens264524d2011-06-06 20:50:03 +000010332 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010333
10334 raise_softirq_irqoff(NET_TX_SOFTIRQ);
10335 local_irq_enable();
10336
ashwanth@codeaurora.org773fc8f2017-06-09 14:24:58 +053010337#ifdef CONFIG_RPS
10338 remsd = oldsd->rps_ipi_list;
10339 oldsd->rps_ipi_list = NULL;
10340#endif
10341 /* send out pending IPI's on offline CPU */
10342 net_rps_send_ipi(remsd);
10343
Linus Torvalds1da177e2005-04-16 15:20:36 -070010344 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +000010345 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -080010346 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +000010347 input_queue_head_incr(oldsd);
10348 }
Eric Dumazetac64da02015-01-15 17:04:22 -080010349 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
Eric Dumazet91e83132015-02-05 14:58:14 -080010350 netif_rx_ni(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +000010351 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -070010352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010353
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +010010354 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010355}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010356
Herbert Xu7f353bf2007-08-10 15:47:58 -070010357/**
Herbert Xub63365a2008-10-23 01:11:29 -070010358 * netdev_increment_features - increment feature set by one
10359 * @all: current feature set
10360 * @one: new feature set
10361 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -070010362 *
10363 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -070010364 * @one to the master device with current feature set @all. Will not
10365 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -070010366 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +000010367netdev_features_t netdev_increment_features(netdev_features_t all,
10368 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -070010369{
Tom Herbertc8cd0982015-12-14 11:19:44 -080010370 if (mask & NETIF_F_HW_CSUM)
Tom Herberta1882222015-12-14 11:19:43 -080010371 mask |= NETIF_F_CSUM_MASK;
Michał Mirosław1742f182011-04-22 06:31:16 +000010372 mask |= NETIF_F_VLAN_CHALLENGED;
10373
Tom Herberta1882222015-12-14 11:19:43 -080010374 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
Michał Mirosław1742f182011-04-22 06:31:16 +000010375 all &= one | ~NETIF_F_ALL_FOR_ALL;
10376
Michał Mirosław1742f182011-04-22 06:31:16 +000010377 /* If one device supports hw checksumming, set for all. */
Tom Herbertc8cd0982015-12-14 11:19:44 -080010378 if (all & NETIF_F_HW_CSUM)
10379 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -070010380
10381 return all;
10382}
Herbert Xub63365a2008-10-23 01:11:29 -070010383EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -070010384
Baruch Siach430f03c2013-06-02 20:43:55 +000010385static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -070010386{
10387 int i;
10388 struct hlist_head *hash;
10389
Kees Cook6da2ec52018-06-12 13:55:00 -070010390 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
Pavel Emelyanov30d97d32007-09-16 15:40:33 -070010391 if (hash != NULL)
10392 for (i = 0; i < NETDEV_HASHENTRIES; i++)
10393 INIT_HLIST_HEAD(&hash[i]);
10394
10395 return hash;
10396}
10397
Eric W. Biederman881d9662007-09-17 11:56:21 -070010398/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -070010399static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -070010400{
Li RongQingd9f37d02018-07-13 14:41:36 +080010401 BUILD_BUG_ON(GRO_HASH_BUCKETS >
Pankaj Bharadiyac5936422019-12-09 10:31:43 -080010402 8 * sizeof_field(struct napi_struct, gro_bitmask));
Li RongQingd9f37d02018-07-13 14:41:36 +080010403
Rustad, Mark D734b6542012-07-18 09:06:07 +000010404 if (net != &init_net)
10405 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -070010406
Pavel Emelyanov30d97d32007-09-16 15:40:33 -070010407 net->dev_name_head = netdev_create_hash();
10408 if (net->dev_name_head == NULL)
10409 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -070010410
Pavel Emelyanov30d97d32007-09-16 15:40:33 -070010411 net->dev_index_head = netdev_create_hash();
10412 if (net->dev_index_head == NULL)
10413 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -070010414
Jiri Pirkoa30c7b42019-09-30 10:15:10 +020010415 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
10416
Eric W. Biederman881d9662007-09-17 11:56:21 -070010417 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -070010418
10419err_idx:
10420 kfree(net->dev_name_head);
10421err_name:
10422 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -070010423}
10424
Stephen Hemmingerf0db2752008-09-30 02:23:58 -070010425/**
10426 * netdev_drivername - network driver for the device
10427 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -070010428 *
10429 * Determine network driver for device.
10430 */
David S. Miller3019de12011-06-06 16:41:33 -070010431const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -070010432{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -070010433 const struct device_driver *driver;
10434 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -070010435 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -070010436
10437 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -070010438 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -070010439 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -070010440
10441 driver = parent->driver;
10442 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -070010443 return driver->name;
10444 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -070010445}
10446
Joe Perches6ea754e2014-09-22 11:10:50 -070010447static void __netdev_printk(const char *level, const struct net_device *dev,
10448 struct va_format *vaf)
Joe Perches256df2f2010-06-27 01:02:35 +000010449{
Joe Perchesb004ff42012-09-12 20:12:19 -070010450 if (dev && dev->dev.parent) {
Joe Perches6ea754e2014-09-22 11:10:50 -070010451 dev_printk_emit(level[1] - '0',
10452 dev->dev.parent,
10453 "%s %s %s%s: %pV",
10454 dev_driver_string(dev->dev.parent),
10455 dev_name(dev->dev.parent),
10456 netdev_name(dev), netdev_reg_state(dev),
10457 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -070010458 } else if (dev) {
Joe Perches6ea754e2014-09-22 11:10:50 -070010459 printk("%s%s%s: %pV",
10460 level, netdev_name(dev), netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -070010461 } else {
Joe Perches6ea754e2014-09-22 11:10:50 -070010462 printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -070010463 }
Joe Perches256df2f2010-06-27 01:02:35 +000010464}
10465
Joe Perches6ea754e2014-09-22 11:10:50 -070010466void netdev_printk(const char *level, const struct net_device *dev,
10467 const char *format, ...)
Joe Perches256df2f2010-06-27 01:02:35 +000010468{
10469 struct va_format vaf;
10470 va_list args;
Joe Perches256df2f2010-06-27 01:02:35 +000010471
10472 va_start(args, format);
10473
10474 vaf.fmt = format;
10475 vaf.va = &args;
10476
Joe Perches6ea754e2014-09-22 11:10:50 -070010477 __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -070010478
Joe Perches256df2f2010-06-27 01:02:35 +000010479 va_end(args);
Joe Perches256df2f2010-06-27 01:02:35 +000010480}
10481EXPORT_SYMBOL(netdev_printk);
10482
10483#define define_netdev_printk_level(func, level) \
Joe Perches6ea754e2014-09-22 11:10:50 -070010484void func(const struct net_device *dev, const char *fmt, ...) \
Joe Perches256df2f2010-06-27 01:02:35 +000010485{ \
Joe Perches256df2f2010-06-27 01:02:35 +000010486 struct va_format vaf; \
10487 va_list args; \
10488 \
10489 va_start(args, fmt); \
10490 \
10491 vaf.fmt = fmt; \
10492 vaf.va = &args; \
10493 \
Joe Perches6ea754e2014-09-22 11:10:50 -070010494 __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -070010495 \
Joe Perches256df2f2010-06-27 01:02:35 +000010496 va_end(args); \
Joe Perches256df2f2010-06-27 01:02:35 +000010497} \
10498EXPORT_SYMBOL(func);
10499
10500define_netdev_printk_level(netdev_emerg, KERN_EMERG);
10501define_netdev_printk_level(netdev_alert, KERN_ALERT);
10502define_netdev_printk_level(netdev_crit, KERN_CRIT);
10503define_netdev_printk_level(netdev_err, KERN_ERR);
10504define_netdev_printk_level(netdev_warn, KERN_WARNING);
10505define_netdev_printk_level(netdev_notice, KERN_NOTICE);
10506define_netdev_printk_level(netdev_info, KERN_INFO);
10507
Pavel Emelyanov46650792007-10-08 20:38:39 -070010508static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -070010509{
10510 kfree(net->dev_name_head);
10511 kfree(net->dev_index_head);
Vasily Averinee21b18b2017-11-12 22:28:46 +030010512 if (net != &init_net)
10513 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
Eric W. Biederman881d9662007-09-17 11:56:21 -070010514}
10515
Denis V. Lunev022cbae2007-11-13 03:23:50 -080010516static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -070010517 .init = netdev_init,
10518 .exit = netdev_exit,
10519};
10520
Pavel Emelyanov46650792007-10-08 20:38:39 -070010521static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +020010522{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +000010523 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +020010524 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +000010525 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +020010526 * initial network namespace
10527 */
10528 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +000010529 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +020010530 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -070010531 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +020010532
10533 /* Ignore unmoveable devices (i.e. loopback) */
10534 if (dev->features & NETIF_F_NETNS_LOCAL)
10535 continue;
10536
Eric W. Biedermane008b5f2009-11-29 22:25:30 +000010537 /* Leave virtual devices for the generic cleanup */
10538 if (dev->rtnl_link_ops)
10539 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -080010540
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010541 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -070010542 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
Jiri Pirko55b40db2019-07-28 14:56:36 +020010543 if (__dev_get_by_name(&init_net, fb_name))
10544 snprintf(fb_name, IFNAMSIZ, "dev%%d");
Pavel Emelyanovaca51392008-05-08 01:24:25 -070010545 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +020010546 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +000010547 pr_emerg("%s: failed to move %s to init_net: %d\n",
10548 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -070010549 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +020010550 }
10551 }
10552 rtnl_unlock();
10553}
10554
Eric W. Biederman50624c92013-09-23 21:19:49 -070010555static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
10556{
10557 /* Return with the rtnl_lock held when there are no network
10558 * devices unregistering in any network namespace in net_list.
10559 */
10560 struct net *net;
10561 bool unregistering;
Peter Zijlstraff960a72014-10-29 17:04:56 +010010562 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Eric W. Biederman50624c92013-09-23 21:19:49 -070010563
Peter Zijlstraff960a72014-10-29 17:04:56 +010010564 add_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -070010565 for (;;) {
Eric W. Biederman50624c92013-09-23 21:19:49 -070010566 unregistering = false;
10567 rtnl_lock();
10568 list_for_each_entry(net, net_list, exit_list) {
10569 if (net->dev_unreg_count > 0) {
10570 unregistering = true;
10571 break;
10572 }
10573 }
10574 if (!unregistering)
10575 break;
10576 __rtnl_unlock();
Peter Zijlstraff960a72014-10-29 17:04:56 +010010577
10578 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Eric W. Biederman50624c92013-09-23 21:19:49 -070010579 }
Peter Zijlstraff960a72014-10-29 17:04:56 +010010580 remove_wait_queue(&netdev_unregistering_wq, &wait);
Eric W. Biederman50624c92013-09-23 21:19:49 -070010581}
10582
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +000010583static void __net_exit default_device_exit_batch(struct list_head *net_list)
10584{
10585 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040010586 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +000010587 * Do this across as many network namespaces as possible to
10588 * improve batching efficiency.
10589 */
10590 struct net_device *dev;
10591 struct net *net;
10592 LIST_HEAD(dev_kill_list);
10593
Eric W. Biederman50624c92013-09-23 21:19:49 -070010594 /* To prevent network device cleanup code from dereferencing
10595 * loopback devices or network devices that have been freed
10596 * wait here for all pending unregistrations to complete,
10597 * before unregistring the loopback device and allowing the
10598 * network namespace be freed.
10599 *
10600 * The netdev todo list containing all network devices
10601 * unregistrations that happen in default_device_exit_batch
10602 * will run in the rtnl_unlock() at the end of
10603 * default_device_exit_batch.
10604 */
10605 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +000010606 list_for_each_entry(net, net_list, exit_list) {
10607 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +020010608 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +000010609 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
10610 else
10611 unregister_netdevice_queue(dev, &dev_kill_list);
10612 }
10613 }
10614 unregister_netdevice_many(&dev_kill_list);
10615 rtnl_unlock();
10616}
10617
Denis V. Lunev022cbae2007-11-13 03:23:50 -080010618static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +020010619 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +000010620 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +020010621};
10622
Linus Torvalds1da177e2005-04-16 15:20:36 -070010623/*
10624 * Initialize the DEV module. At boot time this walks the device list and
10625 * unhooks any devices that fail to initialise (normally hardware not
10626 * present) and leaves us with a valid list of present and active devices.
10627 *
10628 */
10629
10630/*
10631 * This is called single threaded during boot, so no need
10632 * to take the rtnl semaphore.
10633 */
10634static int __init net_dev_init(void)
10635{
10636 int i, rc = -ENOMEM;
10637
10638 BUG_ON(!dev_boot_phase);
10639
Linus Torvalds1da177e2005-04-16 15:20:36 -070010640 if (dev_proc_init())
10641 goto out;
10642
Eric W. Biederman8b41d182007-09-26 22:02:53 -070010643 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -070010644 goto out;
10645
10646 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +080010647 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010648 INIT_LIST_HEAD(&ptype_base[i]);
10649
Vlad Yasevich62532da2012-11-15 08:49:10 +000010650 INIT_LIST_HEAD(&offload_base);
10651
Eric W. Biederman881d9662007-09-17 11:56:21 -070010652 if (register_pernet_subsys(&netdev_net_ops))
10653 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010654
10655 /*
10656 * Initialise the packet receive queues.
10657 */
10658
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -070010659 for_each_possible_cpu(i) {
Eric Dumazet41852492016-08-26 12:50:39 -070010660 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
Eric Dumazete36fa2f2010-04-19 21:17:14 +000010661 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010662
Eric Dumazet41852492016-08-26 12:50:39 -070010663 INIT_WORK(flush, flush_backlog);
10664
Eric Dumazete36fa2f2010-04-19 21:17:14 +000010665 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -070010666 skb_queue_head_init(&sd->process_queue);
Steffen Klassertf53c7232017-12-20 10:41:36 +010010667#ifdef CONFIG_XFRM_OFFLOAD
10668 skb_queue_head_init(&sd->xfrm_backlog);
10669#endif
Eric Dumazete36fa2f2010-04-19 21:17:14 +000010670 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +000010671 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +000010672#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +000010673 sd->csd.func = rps_trigger_softirq;
10674 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +000010675 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -070010676#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +000010677
David S. Miller7c4ec742018-07-20 23:37:55 -070010678 init_gro_hash(&sd->backlog);
Eric Dumazete36fa2f2010-04-19 21:17:14 +000010679 sd->backlog.poll = process_backlog;
10680 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010681 }
10682
Linus Torvalds1da177e2005-04-16 15:20:36 -070010683 dev_boot_phase = 0;
10684
Eric W. Biederman505d4f72008-11-07 22:54:20 -080010685 /* The loopback device is special if any other network devices
10686 * is present in a network namespace the loopback device must
10687 * be present. Since we now dynamically allocate and free the
10688 * loopback device ensure this invariant is maintained by
10689 * keeping the loopback device as the first device on the
10690 * list of network devices. Ensuring the loopback devices
10691 * is the first device that appears and the last network device
10692 * that disappears.
10693 */
10694 if (register_pernet_device(&loopback_net_ops))
10695 goto out;
10696
10697 if (register_pernet_device(&default_device_ops))
10698 goto out;
10699
Carlos R. Mafra962cf362008-05-15 11:15:37 -030010700 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
10701 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010702
Sebastian Andrzej Siewiorf0bf90d2016-11-03 15:50:04 +010010703 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
10704 NULL, dev_cpu_dead);
10705 WARN_ON(rc < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010706 rc = 0;
10707out:
10708 return rc;
10709}
10710
10711subsys_initcall(net_dev_init);