blob: 5f747974ac5870012b18ea02a60bb46033b40715 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000142static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000143struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
144struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700148 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 * semaphore.
150 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800151 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 *
153 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700154 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * actual updates. This allows pure readers to access the list even
156 * while a writer is preparing to update it.
157 *
158 * To put it another way, dev_base_lock is held for writing only to
159 * protect against pure readers; the rtnl semaphore provides the
160 * protection against other writers.
161 *
162 * See, for example usages, register_netdevice() and
163 * unregister_netdevice(), which must be called with the rtnl
164 * semaphore held.
165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167EXPORT_SYMBOL(dev_base_lock);
168
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +0000169seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000170
Thomas Graf4e985ad2011-06-21 03:11:20 +0000171static inline void dev_base_seq_inc(struct net *net)
172{
173 while (++net->dev_base_seq == 0);
174}
175
Eric W. Biederman881d9662007-09-17 11:56:21 -0700176static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Eric Dumazet95c96172012-04-15 05:58:06 +0000178 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
179
stephen hemminger08e98972009-11-10 07:20:34 +0000180 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
Eric W. Biederman881d9662007-09-17 11:56:21 -0700183static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700185 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000188static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000189{
190#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000191 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000192#endif
193}
194
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000195static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000196{
197#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000198 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000199#endif
200}
201
Eric W. Biedermance286d32007-09-12 13:53:49 +0200202/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000203static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200204{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900205 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200206
207 ASSERT_RTNL();
208
209 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800210 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000211 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000212 hlist_add_head_rcu(&dev->index_hlist,
213 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000215
216 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200217}
218
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219/* Device list removal
220 * caller must respect a RCU grace period before freeing/reusing dev
221 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222static void unlist_netdevice(struct net_device *dev)
223{
224 ASSERT_RTNL();
225
226 /* Unlink dev from the device chain */
227 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800228 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000229 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000230 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000232
233 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200234}
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/*
237 * Our notifier list
238 */
239
Alan Sternf07d5b92006-05-09 15:23:03 -0700240static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242/*
243 * Device drivers call our routines to queue packets here. We empty the
244 * queue in the local softnet handler.
245 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700246
Eric Dumazet9958da02010-04-17 04:17:02 +0000247DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700248EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
David S. Millercf508b12008-07-22 14:16:42 -0700250#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700251/*
David S. Millerc773e842008-07-08 23:13:53 -0700252 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700253 * according to dev->type
254 */
255static const unsigned short netdev_lock_type[] =
256 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
257 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
258 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
259 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
260 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
261 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
262 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
263 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
264 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
265 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
266 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
267 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400268 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
269 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
270 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700271
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700272static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400285 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
286 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
287 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700288
289static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700290static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700291
292static inline unsigned short netdev_lock_pos(unsigned short dev_type)
293{
294 int i;
295
296 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
297 if (netdev_lock_type[i] == dev_type)
298 return i;
299 /* the last key is used by default */
300 return ARRAY_SIZE(netdev_lock_type) - 1;
301}
302
David S. Millercf508b12008-07-22 14:16:42 -0700303static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
304 unsigned short dev_type)
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700305{
306 int i;
307
308 i = netdev_lock_pos(dev_type);
309 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
310 netdev_lock_name[i]);
311}
David S. Millercf508b12008-07-22 14:16:42 -0700312
313static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
314{
315 int i;
316
317 i = netdev_lock_pos(dev->type);
318 lockdep_set_class_and_name(&dev->addr_list_lock,
319 &netdev_addr_lock_key[i],
320 netdev_lock_name[i]);
321}
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700322#else
David S. Millercf508b12008-07-22 14:16:42 -0700323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
325{
326}
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b72007-05-15 22:46:18 -0700328{
329}
330#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332/*******************************************************************************
333
334 Protocol management and registration routines
335
336*******************************************************************************/
337
338/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * Add a protocol ID to the list. Now that the input handler is
340 * smarter we can dispense with all the messy stuff that used to be
341 * here.
342 *
343 * BEWARE!!! Protocol handlers, mangling input packets,
344 * MUST BE last in hash buckets and checking protocol handlers
345 * MUST start from promiscuous ptype_all chain in net_bh.
346 * It is true now, do not change it.
347 * Explanation follows: if protocol handler, mangling packet, will
348 * be the first on list, it is not able to sense, that packet
349 * is cloned and should be copied-on-write, so that it will
350 * change it and subsequent readers will get broken packet.
351 * --ANK (980803)
352 */
353
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000354static inline struct list_head *ptype_head(const struct packet_type *pt)
355{
356 if (pt->type == htons(ETH_P_ALL))
357 return &ptype_all;
358 else
359 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
360}
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362/**
363 * dev_add_pack - add packet handler
364 * @pt: packet type declaration
365 *
366 * Add a protocol handler to the networking stack. The passed &packet_type
367 * is linked into kernel lists and may not be freed until it has been
368 * removed from the kernel lists.
369 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900370 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 * guarantee all CPU's that are in middle of receiving packets
372 * will see the new packet type (until the next received packet).
373 */
374
375void dev_add_pack(struct packet_type *pt)
376{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000377 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000379 spin_lock(&ptype_lock);
380 list_add_rcu(&pt->list, head);
381 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700383EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000400 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct packet_type *pt1;
402
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000403 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 list_for_each_entry(pt1, head, list) {
406 if (pt == pt1) {
407 list_del_rcu(&pt->list);
408 goto out;
409 }
410 }
411
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000412 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000414 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700416EXPORT_SYMBOL(__dev_remove_pack);
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/**
419 * dev_remove_pack - remove packet handler
420 * @pt: packet type declaration
421 *
422 * Remove a protocol handler that was previously added to the kernel
423 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
424 * from the kernel lists and can be freed or reused once this function
425 * returns.
426 *
427 * This call sleeps to guarantee that no CPU is looking at the packet
428 * type after return.
429 */
430void dev_remove_pack(struct packet_type *pt)
431{
432 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 synchronize_net();
435}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700436EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Vlad Yasevich62532da2012-11-15 08:49:10 +0000438
439/**
440 * dev_add_offload - register offload handlers
441 * @po: protocol offload declaration
442 *
443 * Add protocol offload handlers to the networking stack. The passed
444 * &proto_offload is linked into kernel lists and may not be freed until
445 * it has been removed from the kernel lists.
446 *
447 * This call does not sleep therefore it can not
448 * guarantee all CPU's that are in middle of receiving packets
449 * will see the new offload handlers (until the next received packet).
450 */
451void dev_add_offload(struct packet_offload *po)
452{
453 struct list_head *head = &offload_base;
454
455 spin_lock(&offload_lock);
456 list_add_rcu(&po->list, head);
457 spin_unlock(&offload_lock);
458}
459EXPORT_SYMBOL(dev_add_offload);
460
461/**
462 * __dev_remove_offload - remove offload handler
463 * @po: packet offload declaration
464 *
465 * Remove a protocol offload handler that was previously added to the
466 * kernel offload handlers by dev_add_offload(). The passed &offload_type
467 * is removed from the kernel lists and can be freed or reused once this
468 * function returns.
469 *
470 * The packet type might still be in use by receivers
471 * and must not be freed until after all the CPU's have gone
472 * through a quiescent state.
473 */
474void __dev_remove_offload(struct packet_offload *po)
475{
476 struct list_head *head = &offload_base;
477 struct packet_offload *po1;
478
Eric Dumazetc53aa502012-11-16 08:08:23 +0000479 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000480
481 list_for_each_entry(po1, head, list) {
482 if (po == po1) {
483 list_del_rcu(&po->list);
484 goto out;
485 }
486 }
487
488 pr_warn("dev_remove_offload: %p not found\n", po);
489out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000490 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000491}
492EXPORT_SYMBOL(__dev_remove_offload);
493
494/**
495 * dev_remove_offload - remove packet offload handler
496 * @po: packet offload declaration
497 *
498 * Remove a packet offload handler that was previously added to the kernel
499 * offload handlers by dev_add_offload(). The passed &offload_type is
500 * removed from the kernel lists and can be freed or reused once this
501 * function returns.
502 *
503 * This call sleeps to guarantee that no CPU is looking at the packet
504 * type after return.
505 */
506void dev_remove_offload(struct packet_offload *po)
507{
508 __dev_remove_offload(po);
509
510 synchronize_net();
511}
512EXPORT_SYMBOL(dev_remove_offload);
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514/******************************************************************************
515
516 Device Boot-time Settings Routines
517
518*******************************************************************************/
519
520/* Boot time configuration table */
521static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
522
523/**
524 * netdev_boot_setup_add - add new setup entry
525 * @name: name of the device
526 * @map: configured settings for the device
527 *
528 * Adds new setup entry to the dev_boot_setup list. The function
529 * returns 0 on error and 1 on success. This is a generic routine to
530 * all netdevices.
531 */
532static int netdev_boot_setup_add(char *name, struct ifmap *map)
533{
534 struct netdev_boot_setup *s;
535 int i;
536
537 s = dev_boot_setup;
538 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
539 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
540 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff92008-07-01 19:57:19 -0700541 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 memcpy(&s[i].map, map, sizeof(s[i].map));
543 break;
544 }
545 }
546
547 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
548}
549
550/**
551 * netdev_boot_setup_check - check boot time settings
552 * @dev: the netdevice
553 *
554 * Check boot time settings for the device.
555 * The found settings are set for the device to be used
556 * later in the device probing.
557 * Returns 0 if no settings found, 1 if they are.
558 */
559int netdev_boot_setup_check(struct net_device *dev)
560{
561 struct netdev_boot_setup *s = dev_boot_setup;
562 int i;
563
564 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
565 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff92008-07-01 19:57:19 -0700566 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 dev->irq = s[i].map.irq;
568 dev->base_addr = s[i].map.base_addr;
569 dev->mem_start = s[i].map.mem_start;
570 dev->mem_end = s[i].map.mem_end;
571 return 1;
572 }
573 }
574 return 0;
575}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700576EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578
579/**
580 * netdev_boot_base - get address from boot time settings
581 * @prefix: prefix for network device
582 * @unit: id for network device
583 *
584 * Check boot time settings for the base address of device.
585 * The found settings are set for the device to be used
586 * later in the device probing.
587 * Returns 0 if no settings found.
588 */
589unsigned long netdev_boot_base(const char *prefix, int unit)
590{
591 const struct netdev_boot_setup *s = dev_boot_setup;
592 char name[IFNAMSIZ];
593 int i;
594
595 sprintf(name, "%s%d", prefix, unit);
596
597 /*
598 * If device already registered then return base of 1
599 * to indicate not to probe for this interface
600 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700601 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return 1;
603
604 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
605 if (!strcmp(name, s[i].name))
606 return s[i].map.base_addr;
607 return 0;
608}
609
610/*
611 * Saves at boot time configured settings for any netdevice.
612 */
613int __init netdev_boot_setup(char *str)
614{
615 int ints[5];
616 struct ifmap map;
617
618 str = get_options(str, ARRAY_SIZE(ints), ints);
619 if (!str || !*str)
620 return 0;
621
622 /* Save settings */
623 memset(&map, 0, sizeof(map));
624 if (ints[0] > 0)
625 map.irq = ints[1];
626 if (ints[0] > 1)
627 map.base_addr = ints[2];
628 if (ints[0] > 2)
629 map.mem_start = ints[3];
630 if (ints[0] > 3)
631 map.mem_end = ints[4];
632
633 /* Add new entry to the list */
634 return netdev_boot_setup_add(str, &map);
635}
636
637__setup("netdev=", netdev_boot_setup);
638
639/*******************************************************************************
640
641 Device Interface Subroutines
642
643*******************************************************************************/
644
645/**
646 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @name: name to find
649 *
650 * Find an interface by name. Must be called under RTNL semaphore
651 * or @dev_base_lock. If the name is found a pointer to the device
652 * is returned. If the name is not found then %NULL is returned. The
653 * reference counters are not incremented so the caller must be
654 * careful with locks.
655 */
656
Eric W. Biederman881d9662007-09-17 11:56:21 -0700657struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700659 struct net_device *dev;
660 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Sasha Levinb67bfe02013-02-27 17:06:00 -0800662 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (!strncmp(dev->name, name, IFNAMSIZ))
664 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return NULL;
667}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700668EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000671 * dev_get_by_name_rcu - find a device by its name
672 * @net: the applicable net namespace
673 * @name: name to find
674 *
675 * Find an interface by name.
676 * If the name is found a pointer to the device is returned.
677 * If the name is not found then %NULL is returned.
678 * The reference counters are not incremented so the caller must be
679 * careful with locks. The caller must hold RCU lock.
680 */
681
682struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
683{
Eric Dumazet72c95282009-10-30 07:11:27 +0000684 struct net_device *dev;
685 struct hlist_head *head = dev_name_hash(net, name);
686
Sasha Levinb67bfe02013-02-27 17:06:00 -0800687 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000688 if (!strncmp(dev->name, name, IFNAMSIZ))
689 return dev;
690
691 return NULL;
692}
693EXPORT_SYMBOL(dev_get_by_name_rcu);
694
695/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700697 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 * @name: name to find
699 *
700 * Find an interface by name. This can be called from any
701 * context and does its own locking. The returned handle has
702 * the usage count incremented and the caller must use dev_put() to
703 * release it when it is no longer needed. %NULL is returned if no
704 * matching device is found.
705 */
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct net_device *dev;
710
Eric Dumazet72c95282009-10-30 07:11:27 +0000711 rcu_read_lock();
712 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (dev)
714 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000715 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return dev;
717}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700718EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720/**
721 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700722 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 * @ifindex: index of device
724 *
725 * Search for an interface by index. Returns %NULL if the device
726 * is not found or a pointer to the device. The device has not
727 * had its reference counter increased so the caller must be careful
728 * about locking. The caller must hold either the RTNL semaphore
729 * or @dev_base_lock.
730 */
731
Eric W. Biederman881d9662007-09-17 11:56:21 -0700732struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700734 struct net_device *dev;
735 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Sasha Levinb67bfe02013-02-27 17:06:00 -0800737 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 if (dev->ifindex == ifindex)
739 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return NULL;
742}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700743EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000745/**
746 * dev_get_by_index_rcu - find a device by its ifindex
747 * @net: the applicable net namespace
748 * @ifindex: index of device
749 *
750 * Search for an interface by index. Returns %NULL if the device
751 * is not found or a pointer to the device. The device has not
752 * had its reference counter increased so the caller must be careful
753 * about locking. The caller must hold RCU lock.
754 */
755
756struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
757{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000758 struct net_device *dev;
759 struct hlist_head *head = dev_index_hash(net, ifindex);
760
Sasha Levinb67bfe02013-02-27 17:06:00 -0800761 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000762 if (dev->ifindex == ifindex)
763 return dev;
764
765 return NULL;
766}
767EXPORT_SYMBOL(dev_get_by_index_rcu);
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770/**
771 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700772 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * @ifindex: index of device
774 *
775 * Search for an interface by index. Returns NULL if the device
776 * is not found or a pointer to the device. The device returned has
777 * had a reference added and the pointer is safe until the user calls
778 * dev_put to indicate they have finished with it.
779 */
780
Eric W. Biederman881d9662007-09-17 11:56:21 -0700781struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 struct net_device *dev;
784
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000785 rcu_read_lock();
786 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (dev)
788 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000789 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return dev;
791}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700792EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000795 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700796 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 * @type: media type of device
798 * @ha: hardware address
799 *
800 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800801 * is not found or a pointer to the device.
802 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000803 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 * and the caller must therefore be careful about locking
805 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 */
807
Eric Dumazet941666c2010-12-05 01:23:53 +0000808struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
809 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810{
811 struct net_device *dev;
812
Eric Dumazet941666c2010-12-05 01:23:53 +0000813 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (dev->type == type &&
815 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700816 return dev;
817
818 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
Eric Dumazet941666c2010-12-05 01:23:53 +0000820EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300821
Eric W. Biederman881d9662007-09-17 11:56:21 -0700822struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700823{
824 struct net_device *dev;
825
826 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700827 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700828 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700829 return dev;
830
831 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700832}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700833EXPORT_SYMBOL(__dev_getfirstbyhwtype);
834
Eric W. Biederman881d9662007-09-17 11:56:21 -0700835struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000837 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000839 rcu_read_lock();
840 for_each_netdev_rcu(net, dev)
841 if (dev->type == type) {
842 dev_hold(dev);
843 ret = dev;
844 break;
845 }
846 rcu_read_unlock();
847 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849EXPORT_SYMBOL(dev_getfirstbyhwtype);
850
851/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000852 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700853 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 * @if_flags: IFF_* values
855 * @mask: bitmask of bits in if_flags to check
856 *
857 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000858 * is not found or a pointer to the device. Must be called inside
859 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 */
861
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000862struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700863 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700865 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Pavel Emelianov7562f872007-05-03 15:13:45 -0700867 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800868 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700870 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 break;
872 }
873 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700874 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000876EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878/**
879 * dev_valid_name - check if name is okay for network device
880 * @name: name string
881 *
882 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700883 * to allow sysfs to work. We also disallow any kind of
884 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 */
David S. Miller95f050b2012-03-06 16:12:15 -0500886bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700888 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500889 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700890 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500891 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700892 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500893 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700894
895 while (*name) {
896 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500897 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700898 name++;
899 }
David S. Miller95f050b2012-03-06 16:12:15 -0500900 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700902EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200905 * __dev_alloc_name - allocate a name for a device
906 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200908 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 *
910 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700911 * id. It scans list of devices to build up a free map, then chooses
912 * the first empty slot. The caller must hold the dev_base or rtnl lock
913 * while allocating the name and adding the device in order to avoid
914 * duplicates.
915 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
916 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 */
918
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200919static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
921 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 const char *p;
923 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700924 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 struct net_device *d;
926
927 p = strnchr(name, IFNAMSIZ-1, '%');
928 if (p) {
929 /*
930 * Verify the string as this thing may have come from
931 * the user. There must be either one "%d" and no other "%"
932 * characters.
933 */
934 if (p[1] != 'd' || strchr(p + 2, '%'))
935 return -EINVAL;
936
937 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700938 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 if (!inuse)
940 return -ENOMEM;
941
Eric W. Biederman881d9662007-09-17 11:56:21 -0700942 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (!sscanf(d->name, name, &i))
944 continue;
945 if (i < 0 || i >= max_netdevices)
946 continue;
947
948 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200949 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (!strncmp(buf, d->name, IFNAMSIZ))
951 set_bit(i, inuse);
952 }
953
954 i = find_first_zero_bit(inuse, max_netdevices);
955 free_page((unsigned long) inuse);
956 }
957
Octavian Purdilad9031022009-11-18 02:36:59 +0000958 if (buf != name)
959 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200960 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 /* It is possible to run out of possible slots
964 * when the name is long and there isn't enough space left
965 * for the digits, or if all bits are used.
966 */
967 return -ENFILE;
968}
969
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200970/**
971 * dev_alloc_name - allocate a name for a device
972 * @dev: device
973 * @name: name format string
974 *
975 * Passed a format string - eg "lt%d" it will try and find a suitable
976 * id. It scans list of devices to build up a free map, then chooses
977 * the first empty slot. The caller must hold the dev_base or rtnl lock
978 * while allocating the name and adding the device in order to avoid
979 * duplicates.
980 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
981 * Returns the number of the unit assigned or a negative errno code.
982 */
983
984int dev_alloc_name(struct net_device *dev, const char *name)
985{
986 char buf[IFNAMSIZ];
987 struct net *net;
988 int ret;
989
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900990 BUG_ON(!dev_net(dev));
991 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 ret = __dev_alloc_name(net, name, buf);
993 if (ret >= 0)
994 strlcpy(dev->name, buf, IFNAMSIZ);
995 return ret;
996}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700997EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200998
Gao feng828de4f2012-09-13 20:58:27 +0000999static int dev_alloc_name_ns(struct net *net,
1000 struct net_device *dev,
1001 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001002{
Gao feng828de4f2012-09-13 20:58:27 +00001003 char buf[IFNAMSIZ];
1004 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001005
Gao feng828de4f2012-09-13 20:58:27 +00001006 ret = __dev_alloc_name(net, name, buf);
1007 if (ret >= 0)
1008 strlcpy(dev->name, buf, IFNAMSIZ);
1009 return ret;
1010}
1011
1012static int dev_get_valid_name(struct net *net,
1013 struct net_device *dev,
1014 const char *name)
1015{
1016 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001017
Octavian Purdilad9031022009-11-18 02:36:59 +00001018 if (!dev_valid_name(name))
1019 return -EINVAL;
1020
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001021 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001022 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001023 else if (__dev_get_by_name(net, name))
1024 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001025 else if (dev->name != name)
1026 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001027
1028 return 0;
1029}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031/**
1032 * dev_change_name - change name of a device
1033 * @dev: device
1034 * @newname: name (or format string) must be at least IFNAMSIZ
1035 *
1036 * Change name of a device, can pass format strings "eth%d".
1037 * for wildcarding.
1038 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001039int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Herbert Xufcc5a032007-07-30 17:03:38 -07001041 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001043 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001044 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001047 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001049 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 if (dev->flags & IFF_UP)
1051 return -EBUSY;
1052
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001053 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001054
1055 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001056 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001057 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001058 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001059
Herbert Xufcc5a032007-07-30 17:03:38 -07001060 memcpy(oldname, dev->name, IFNAMSIZ);
1061
Gao feng828de4f2012-09-13 20:58:27 +00001062 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001063 if (err < 0) {
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001064 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Herbert Xufcc5a032007-07-30 17:03:38 -07001068rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001069 ret = device_rename(&dev->dev, dev->name);
1070 if (ret) {
1071 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001072 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001073 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001074 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001075
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001076 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001077
Herbert Xu7f988ea2007-07-30 16:35:46 -07001078 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001079 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001080 write_unlock_bh(&dev_base_lock);
1081
1082 synchronize_rcu();
1083
1084 write_lock_bh(&dev_base_lock);
1085 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001086 write_unlock_bh(&dev_base_lock);
1087
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001088 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001089 ret = notifier_to_errno(ret);
1090
1091 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001092 /* err >= 0 after dev_alloc_name() or stores the first errno */
1093 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001094 err = ret;
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001096 memcpy(dev->name, oldname, IFNAMSIZ);
1097 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001098 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001099 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001100 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001101 }
1102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 return err;
1105}
1106
1107/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001108 * dev_set_alias - change ifalias of a device
1109 * @dev: device
1110 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001111 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001112 *
1113 * Set ifalias for a device,
1114 */
1115int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1116{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001117 char *new_ifalias;
1118
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001119 ASSERT_RTNL();
1120
1121 if (len >= IFALIASZ)
1122 return -EINVAL;
1123
Oliver Hartkopp96ca4a2c2008-09-23 21:23:19 -07001124 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001125 kfree(dev->ifalias);
1126 dev->ifalias = NULL;
Oliver Hartkopp96ca4a2c2008-09-23 21:23:19 -07001127 return 0;
1128 }
1129
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001130 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1131 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001132 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001133 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001134
1135 strlcpy(dev->ifalias, alias, len+1);
1136 return len;
1137}
1138
1139
1140/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001141 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001142 * @dev: device to cause notification
1143 *
1144 * Called to indicate a device has changed features.
1145 */
1146void netdev_features_change(struct net_device *dev)
1147{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001148 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001149}
1150EXPORT_SYMBOL(netdev_features_change);
1151
1152/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 * netdev_state_change - device changes state
1154 * @dev: device to cause notification
1155 *
1156 * Called to indicate a device has changed state. This function calls
1157 * the notifier chains for netdev_chain and sends a NEWLINK message
1158 * to the routing socket.
1159 */
1160void netdev_state_change(struct net_device *dev)
1161{
1162 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001163 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1165 }
1166}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001167EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Amerigo Wangee89bab2012-08-09 22:14:56 +00001169/**
1170 * netdev_notify_peers - notify network peers about existence of @dev
1171 * @dev: network device
1172 *
1173 * Generate traffic such that interested network peers are aware of
1174 * @dev, such as by generating a gratuitous ARP. This may be used when
1175 * a device wants to inform the rest of the network about some sort of
1176 * reconfiguration such as a failover event or virtual machine
1177 * migration.
1178 */
1179void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001180{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001181 rtnl_lock();
1182 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1183 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001184}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001185EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001186
Patrick McHardybd380812010-02-26 06:34:53 +00001187static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001189 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001190 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001192 ASSERT_RTNL();
1193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (!netif_device_present(dev))
1195 return -ENODEV;
1196
Neil Hormanca99ca12013-02-05 08:05:43 +00001197 /* Block netpoll from trying to do any rx path servicing.
1198 * If we don't do this there is a chance ndo_poll_controller
1199 * or ndo_poll may be running while we open the device
1200 */
dingtianhongda6e3782013-05-27 19:53:31 +00001201 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001202
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001203 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1204 ret = notifier_to_errno(ret);
1205 if (ret)
1206 return ret;
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001209
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001210 if (ops->ndo_validate_addr)
1211 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001212
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001213 if (!ret && ops->ndo_open)
1214 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Neil Hormanca99ca12013-02-05 08:05:43 +00001216 netpoll_rx_enable(dev);
1217
Jeff Garzikbada3392007-10-23 20:19:37 -07001218 if (ret)
1219 clear_bit(__LINK_STATE_START, &dev->state);
1220 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001222 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001223 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001225 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 return ret;
1229}
Patrick McHardybd380812010-02-26 06:34:53 +00001230
1231/**
1232 * dev_open - prepare an interface for use.
1233 * @dev: device to open
1234 *
1235 * Takes a device from down to up state. The device's private open
1236 * function is invoked and then the multicast lists are loaded. Finally
1237 * the device is moved into the up state and a %NETDEV_UP message is
1238 * sent to the netdev notifier chain.
1239 *
1240 * Calling this function on an active interface is a nop. On a failure
1241 * a negative errno code is returned.
1242 */
1243int dev_open(struct net_device *dev)
1244{
1245 int ret;
1246
Patrick McHardybd380812010-02-26 06:34:53 +00001247 if (dev->flags & IFF_UP)
1248 return 0;
1249
Patrick McHardybd380812010-02-26 06:34:53 +00001250 ret = __dev_open(dev);
1251 if (ret < 0)
1252 return ret;
1253
Patrick McHardybd380812010-02-26 06:34:53 +00001254 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1255 call_netdevice_notifiers(NETDEV_UP, dev);
1256
1257 return ret;
1258}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001259EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Octavian Purdila44345722010-12-13 12:44:07 +00001261static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262{
Octavian Purdila44345722010-12-13 12:44:07 +00001263 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001264
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001265 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001266 might_sleep();
1267
Octavian Purdila44345722010-12-13 12:44:07 +00001268 list_for_each_entry(dev, head, unreg_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001269 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Octavian Purdila44345722010-12-13 12:44:07 +00001271 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Octavian Purdila44345722010-12-13 12:44:07 +00001273 /* Synchronize to scheduled poll. We cannot touch poll list, it
1274 * can be even on different cpu. So just clear netif_running().
1275 *
1276 * dev->stop() will invoke napi_disable() on all of it's
1277 * napi_struct instances on this device.
1278 */
1279 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Octavian Purdila44345722010-12-13 12:44:07 +00001282 dev_deactivate_many(head);
1283
1284 list_for_each_entry(dev, head, unreg_list) {
1285 const struct net_device_ops *ops = dev->netdev_ops;
1286
1287 /*
1288 * Call the device specific close. This cannot fail.
1289 * Only if device is UP
1290 *
1291 * We allow it to be called even after a DETACH hot-plug
1292 * event.
1293 */
1294 if (ops->ndo_stop)
1295 ops->ndo_stop(dev);
1296
Octavian Purdila44345722010-12-13 12:44:07 +00001297 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001298 net_dmaengine_put();
1299 }
1300
1301 return 0;
1302}
1303
1304static int __dev_close(struct net_device *dev)
1305{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001306 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001307 LIST_HEAD(single);
1308
Neil Hormanca99ca12013-02-05 08:05:43 +00001309 /* Temporarily disable netpoll until the interface is down */
dingtianhongda6e3782013-05-27 19:53:31 +00001310 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001311
Octavian Purdila44345722010-12-13 12:44:07 +00001312 list_add(&dev->unreg_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001313 retval = __dev_close_many(&single);
1314 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001315
1316 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001317 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001318}
1319
Eric Dumazet3fbd87582011-01-19 21:23:22 +00001320static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001321{
1322 struct net_device *dev, *tmp;
1323 LIST_HEAD(tmp_list);
1324
1325 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1326 if (!(dev->flags & IFF_UP))
1327 list_move(&dev->unreg_list, &tmp_list);
1328
1329 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001330
Octavian Purdila44345722010-12-13 12:44:07 +00001331 list_for_each_entry(dev, head, unreg_list) {
1332 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1333 call_netdevice_notifiers(NETDEV_DOWN, dev);
1334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Octavian Purdila44345722010-12-13 12:44:07 +00001336 /* rollback_registered_many needs the complete original list */
1337 list_splice(&tmp_list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 return 0;
1339}
Patrick McHardybd380812010-02-26 06:34:53 +00001340
1341/**
1342 * dev_close - shutdown an interface.
1343 * @dev: device to shutdown
1344 *
1345 * This function moves an active device into down state. A
1346 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1347 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1348 * chain.
1349 */
1350int dev_close(struct net_device *dev)
1351{
Eric Dumazete14a5992011-05-10 12:26:06 -07001352 if (dev->flags & IFF_UP) {
1353 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001354
Neil Hormanca99ca12013-02-05 08:05:43 +00001355 /* Block netpoll rx while the interface is going down */
dingtianhongda6e3782013-05-27 19:53:31 +00001356 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001357
Eric Dumazete14a5992011-05-10 12:26:06 -07001358 list_add(&dev->unreg_list, &single);
1359 dev_close_many(&single);
1360 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001361
1362 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001363 }
dingtianhongda6e3782013-05-27 19:53:31 +00001364 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001365}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001366EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001369/**
1370 * dev_disable_lro - disable Large Receive Offload on a device
1371 * @dev: device
1372 *
1373 * Disable Large Receive Offload (LRO) on a net device. Must be
1374 * called under RTNL. This is needed if received packets may be
1375 * forwarded to another interface.
1376 */
1377void dev_disable_lro(struct net_device *dev)
1378{
Neil Hormanf11970e2011-05-24 08:31:09 +00001379 /*
1380 * If we're trying to disable lro on a vlan device
1381 * use the underlying physical device instead
1382 */
1383 if (is_vlan_dev(dev))
1384 dev = vlan_dev_real_dev(dev);
1385
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001386 dev->wanted_features &= ~NETIF_F_LRO;
1387 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001388
Michał Mirosław22d59692011-04-21 12:42:15 +00001389 if (unlikely(dev->features & NETIF_F_LRO))
1390 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001391}
1392EXPORT_SYMBOL(dev_disable_lro);
1393
1394
Eric W. Biederman881d9662007-09-17 11:56:21 -07001395static int dev_boot_phase = 1;
1396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397/**
1398 * register_netdevice_notifier - register a network notifier block
1399 * @nb: notifier
1400 *
1401 * Register a notifier to be called when network device events occur.
1402 * The notifier passed is linked into the kernel structures and must
1403 * not be reused until it has been unregistered. A negative errno code
1404 * is returned on a failure.
1405 *
1406 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001407 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 * view of the network device list.
1409 */
1410
1411int register_netdevice_notifier(struct notifier_block *nb)
1412{
1413 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001414 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001415 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 int err;
1417
1418 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001419 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001420 if (err)
1421 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001422 if (dev_boot_phase)
1423 goto unlock;
1424 for_each_net(net) {
1425 for_each_netdev(net, dev) {
1426 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1427 err = notifier_to_errno(err);
1428 if (err)
1429 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Eric W. Biederman881d9662007-09-17 11:56:21 -07001431 if (!(dev->flags & IFF_UP))
1432 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001433
Eric W. Biederman881d9662007-09-17 11:56:21 -07001434 nb->notifier_call(nb, NETDEV_UP, dev);
1435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001437
1438unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 rtnl_unlock();
1440 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001441
1442rollback:
1443 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001444 for_each_net(net) {
1445 for_each_netdev(net, dev) {
1446 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001447 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001448
Eric W. Biederman881d9662007-09-17 11:56:21 -07001449 if (dev->flags & IFF_UP) {
1450 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1451 nb->notifier_call(nb, NETDEV_DOWN, dev);
1452 }
1453 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001454 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001455 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001456
RongQing.Li8f891482011-11-30 23:43:07 -05001457outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001458 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001459 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001461EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463/**
1464 * unregister_netdevice_notifier - unregister a network notifier block
1465 * @nb: notifier
1466 *
1467 * Unregister a notifier previously registered by
1468 * register_netdevice_notifier(). The notifier is unlinked into the
1469 * kernel structures and may then be reused. A negative errno code
1470 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001471 *
1472 * After unregistering unregister and down device events are synthesized
1473 * for all devices on the device list to the removed notifier to remove
1474 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 */
1476
1477int unregister_netdevice_notifier(struct notifier_block *nb)
1478{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001479 struct net_device *dev;
1480 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001481 int err;
1482
1483 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001484 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001485 if (err)
1486 goto unlock;
1487
1488 for_each_net(net) {
1489 for_each_netdev(net, dev) {
1490 if (dev->flags & IFF_UP) {
1491 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1492 nb->notifier_call(nb, NETDEV_DOWN, dev);
1493 }
1494 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001495 }
1496 }
1497unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001498 rtnl_unlock();
1499 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001501EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503/**
1504 * call_netdevice_notifiers - call all network notifier blocks
1505 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001506 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 *
1508 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001509 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 */
1511
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001512int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513{
Jiri Pirkoab930472010-04-20 01:45:37 -07001514 ASSERT_RTNL();
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001515 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001517EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Ingo Molnarc5905af2012-02-24 08:31:31 +01001519static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001520#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001521/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001522 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001523 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001524 */
1525static atomic_t netstamp_needed_deferred;
1526#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
1528void net_enable_timestamp(void)
1529{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001530#ifdef HAVE_JUMP_LABEL
1531 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1532
1533 if (deferred) {
1534 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001535 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001536 return;
1537 }
1538#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001539 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001541EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543void net_disable_timestamp(void)
1544{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001545#ifdef HAVE_JUMP_LABEL
1546 if (in_interrupt()) {
1547 atomic_inc(&netstamp_needed_deferred);
1548 return;
1549 }
1550#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001551 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001553EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Eric Dumazet3b098e22010-05-15 23:57:10 -07001555static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556{
Eric Dumazet588f0332011-11-15 04:12:55 +00001557 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001558 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001559 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560}
1561
Eric Dumazet588f0332011-11-15 04:12:55 +00001562#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001563 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001564 if ((COND) && !(SKB)->tstamp.tv64) \
1565 __net_timestamp(SKB); \
1566 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001567
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001568static inline bool is_skb_forwardable(struct net_device *dev,
1569 struct sk_buff *skb)
1570{
1571 unsigned int len;
1572
1573 if (!(dev->flags & IFF_UP))
1574 return false;
1575
1576 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1577 if (skb->len <= len)
1578 return true;
1579
1580 /* if TSO is enabled, we don't care about the length as the packet
1581 * could be forwarded without being segmented before
1582 */
1583 if (skb_is_gso(skb))
1584 return true;
1585
1586 return false;
1587}
1588
Arnd Bergmann44540962009-11-26 06:07:08 +00001589/**
1590 * dev_forward_skb - loopback an skb to another netif
1591 *
1592 * @dev: destination network device
1593 * @skb: buffer to forward
1594 *
1595 * return values:
1596 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001597 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001598 *
1599 * dev_forward_skb can be used for injecting an skb from the
1600 * start_xmit function of one device into the receive queue
1601 * of another device.
1602 *
1603 * The receiving device may be in another namespace, so
1604 * we have to clear all information in the skb that could
1605 * impact namespace isolation.
1606 */
1607int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1608{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001609 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1610 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1611 atomic_long_inc(&dev->rx_dropped);
1612 kfree_skb(skb);
1613 return NET_RX_DROP;
1614 }
1615 }
1616
Arnd Bergmann44540962009-11-26 06:07:08 +00001617 skb_orphan(skb);
1618
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001619 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001620 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001621 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001622 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001623 }
Benjamin LaHaise3b9785c2012-03-27 15:55:44 +00001624 skb->skb_iif = 0;
David S. Miller59b99972012-05-10 23:03:34 -04001625 skb_dst_drop(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001626 skb->tstamp.tv64 = 0;
1627 skb->pkt_type = PACKET_HOST;
1628 skb->protocol = eth_type_trans(skb, dev);
David S. Miller59b99972012-05-10 23:03:34 -04001629 skb->mark = 0;
1630 secpath_reset(skb);
1631 nf_reset(skb);
Patrick McHardy124dff02013-04-05 20:42:05 +02001632 nf_reset_trace(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001633 return netif_rx(skb);
1634}
1635EXPORT_SYMBOL_GPL(dev_forward_skb);
1636
Changli Gao71d9dec2010-12-15 19:57:25 +00001637static inline int deliver_skb(struct sk_buff *skb,
1638 struct packet_type *pt_prev,
1639 struct net_device *orig_dev)
1640{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001641 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1642 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001643 atomic_inc(&skb->users);
1644 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1645}
1646
Eric Leblondc0de08d2012-08-16 22:02:58 +00001647static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1648{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001649 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001650 return false;
1651
1652 if (ptype->id_match)
1653 return ptype->id_match(ptype, skb->sk);
1654 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1655 return true;
1656
1657 return false;
1658}
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660/*
1661 * Support routine. Sends outgoing frames to any network
1662 * taps currently in use.
1663 */
1664
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001665static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666{
1667 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001668 struct sk_buff *skb2 = NULL;
1669 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 rcu_read_lock();
1672 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1673 /* Never send packets back to the socket
1674 * they originated from - MvS (miquels@drinkel.ow.org)
1675 */
1676 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001677 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001678 if (pt_prev) {
1679 deliver_skb(skb2, pt_prev, skb->dev);
1680 pt_prev = ptype;
1681 continue;
1682 }
1683
1684 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 if (!skb2)
1686 break;
1687
Eric Dumazet70978182010-12-20 21:22:51 +00001688 net_timestamp_set(skb2);
1689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 /* skb->nh should be correctly
1691 set by sender, so that the second statement is
1692 just protection against buggy protocols.
1693 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001694 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001696 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001697 skb2->network_header > skb2->tail) {
Joe Perchese87cc472012-05-13 21:56:26 +00001698 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1699 ntohs(skb2->protocol),
1700 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001701 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 }
1703
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001704 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001706 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 }
1708 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001709 if (pt_prev)
1710 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 rcu_read_unlock();
1712}
1713
Ben Hutchings2c530402012-07-10 10:55:09 +00001714/**
1715 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001716 * @dev: Network device
1717 * @txq: number of queues available
1718 *
1719 * If real_num_tx_queues is changed the tc mappings may no longer be
1720 * valid. To resolve this verify the tc mapping remains valid and if
1721 * not NULL the mapping. With no priorities mapping to this
1722 * offset/count pair it will no longer be used. In the worst case TC0
1723 * is invalid nothing can be done so disable priority mappings. If is
1724 * expected that drivers will fix this mapping if they can before
1725 * calling netif_set_real_num_tx_queues.
1726 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001727static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001728{
1729 int i;
1730 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1731
1732 /* If TC0 is invalidated disable TC mapping */
1733 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001734 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001735 dev->num_tc = 0;
1736 return;
1737 }
1738
1739 /* Invalidated prio to tc mappings set to TC0 */
1740 for (i = 1; i < TC_BITMASK + 1; i++) {
1741 int q = netdev_get_prio_tc_map(dev, i);
1742
1743 tc = &dev->tc_to_txq[q];
1744 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001745 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1746 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001747 netdev_set_prio_tc_map(dev, i, 0);
1748 }
1749 }
1750}
1751
Alexander Duyck537c00d2013-01-10 08:57:02 +00001752#ifdef CONFIG_XPS
1753static DEFINE_MUTEX(xps_map_mutex);
1754#define xmap_dereference(P) \
1755 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1756
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001757static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1758 int cpu, u16 index)
1759{
1760 struct xps_map *map = NULL;
1761 int pos;
1762
1763 if (dev_maps)
1764 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1765
1766 for (pos = 0; map && pos < map->len; pos++) {
1767 if (map->queues[pos] == index) {
1768 if (map->len > 1) {
1769 map->queues[pos] = map->queues[--map->len];
1770 } else {
1771 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1772 kfree_rcu(map, rcu);
1773 map = NULL;
1774 }
1775 break;
1776 }
1777 }
1778
1779 return map;
1780}
1781
Alexander Duyck024e9672013-01-10 08:57:46 +00001782static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001783{
1784 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001785 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001786 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001787
1788 mutex_lock(&xps_map_mutex);
1789 dev_maps = xmap_dereference(dev->xps_maps);
1790
1791 if (!dev_maps)
1792 goto out_no_maps;
1793
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001794 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001795 for (i = index; i < dev->num_tx_queues; i++) {
1796 if (!remove_xps_queue(dev_maps, cpu, i))
1797 break;
1798 }
1799 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001800 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001801 }
1802
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001803 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001804 RCU_INIT_POINTER(dev->xps_maps, NULL);
1805 kfree_rcu(dev_maps, rcu);
1806 }
1807
Alexander Duyck024e9672013-01-10 08:57:46 +00001808 for (i = index; i < dev->num_tx_queues; i++)
1809 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1810 NUMA_NO_NODE);
1811
Alexander Duyck537c00d2013-01-10 08:57:02 +00001812out_no_maps:
1813 mutex_unlock(&xps_map_mutex);
1814}
1815
Alexander Duyck01c5f862013-01-10 08:57:35 +00001816static struct xps_map *expand_xps_map(struct xps_map *map,
1817 int cpu, u16 index)
1818{
1819 struct xps_map *new_map;
1820 int alloc_len = XPS_MIN_MAP_ALLOC;
1821 int i, pos;
1822
1823 for (pos = 0; map && pos < map->len; pos++) {
1824 if (map->queues[pos] != index)
1825 continue;
1826 return map;
1827 }
1828
1829 /* Need to add queue to this CPU's existing map */
1830 if (map) {
1831 if (pos < map->alloc_len)
1832 return map;
1833
1834 alloc_len = map->alloc_len * 2;
1835 }
1836
1837 /* Need to allocate new map to store queue on this CPU's map */
1838 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1839 cpu_to_node(cpu));
1840 if (!new_map)
1841 return NULL;
1842
1843 for (i = 0; i < pos; i++)
1844 new_map->queues[i] = map->queues[i];
1845 new_map->alloc_len = alloc_len;
1846 new_map->len = pos;
1847
1848 return new_map;
1849}
1850
Alexander Duyck537c00d2013-01-10 08:57:02 +00001851int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1852{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001853 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001854 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001855 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001856 int cpu, numa_node_id = -2;
1857 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001858
1859 mutex_lock(&xps_map_mutex);
1860
1861 dev_maps = xmap_dereference(dev->xps_maps);
1862
Alexander Duyck01c5f862013-01-10 08:57:35 +00001863 /* allocate memory for queue storage */
1864 for_each_online_cpu(cpu) {
1865 if (!cpumask_test_cpu(cpu, mask))
1866 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001867
Alexander Duyck01c5f862013-01-10 08:57:35 +00001868 if (!new_dev_maps)
1869 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001870 if (!new_dev_maps) {
1871 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001872 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001873 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001874
1875 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1876 NULL;
1877
1878 map = expand_xps_map(map, cpu, index);
1879 if (!map)
1880 goto error;
1881
1882 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1883 }
1884
1885 if (!new_dev_maps)
1886 goto out_no_new_maps;
1887
1888 for_each_possible_cpu(cpu) {
1889 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1890 /* add queue to CPU maps */
1891 int pos = 0;
1892
1893 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1894 while ((pos < map->len) && (map->queues[pos] != index))
1895 pos++;
1896
1897 if (pos == map->len)
1898 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001899#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001900 if (numa_node_id == -2)
1901 numa_node_id = cpu_to_node(cpu);
1902 else if (numa_node_id != cpu_to_node(cpu))
1903 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001904#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001905 } else if (dev_maps) {
1906 /* fill in the new device map from the old device map */
1907 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1908 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001909 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001910
Alexander Duyck537c00d2013-01-10 08:57:02 +00001911 }
1912
Alexander Duyck01c5f862013-01-10 08:57:35 +00001913 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1914
Alexander Duyck537c00d2013-01-10 08:57:02 +00001915 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001916 if (dev_maps) {
1917 for_each_possible_cpu(cpu) {
1918 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1919 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1920 if (map && map != new_map)
1921 kfree_rcu(map, rcu);
1922 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001923
Alexander Duyck537c00d2013-01-10 08:57:02 +00001924 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001925 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001926
Alexander Duyck01c5f862013-01-10 08:57:35 +00001927 dev_maps = new_dev_maps;
1928 active = true;
1929
1930out_no_new_maps:
1931 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00001932 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
1933 (numa_node_id >= 0) ? numa_node_id :
1934 NUMA_NO_NODE);
1935
Alexander Duyck01c5f862013-01-10 08:57:35 +00001936 if (!dev_maps)
1937 goto out_no_maps;
1938
1939 /* removes queue from unused CPUs */
1940 for_each_possible_cpu(cpu) {
1941 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
1942 continue;
1943
1944 if (remove_xps_queue(dev_maps, cpu, index))
1945 active = true;
1946 }
1947
1948 /* free map if not active */
1949 if (!active) {
1950 RCU_INIT_POINTER(dev->xps_maps, NULL);
1951 kfree_rcu(dev_maps, rcu);
1952 }
1953
1954out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00001955 mutex_unlock(&xps_map_mutex);
1956
1957 return 0;
1958error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00001959 /* remove any maps that we added */
1960 for_each_possible_cpu(cpu) {
1961 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1962 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1963 NULL;
1964 if (new_map && new_map != map)
1965 kfree(new_map);
1966 }
1967
Alexander Duyck537c00d2013-01-10 08:57:02 +00001968 mutex_unlock(&xps_map_mutex);
1969
Alexander Duyck537c00d2013-01-10 08:57:02 +00001970 kfree(new_dev_maps);
1971 return -ENOMEM;
1972}
1973EXPORT_SYMBOL(netif_set_xps_queue);
1974
1975#endif
John Fastabendf0796d52010-07-01 13:21:57 +00001976/*
1977 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1978 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1979 */
Tom Herberte6484932010-10-18 18:04:39 +00001980int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00001981{
Tom Herbert1d24eb42010-11-21 13:17:27 +00001982 int rc;
1983
Tom Herberte6484932010-10-18 18:04:39 +00001984 if (txq < 1 || txq > dev->num_tx_queues)
1985 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00001986
Ben Hutchings5c565802011-02-15 19:39:21 +00001987 if (dev->reg_state == NETREG_REGISTERED ||
1988 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00001989 ASSERT_RTNL();
1990
Tom Herbert1d24eb42010-11-21 13:17:27 +00001991 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1992 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00001993 if (rc)
1994 return rc;
1995
John Fastabend4f57c082011-01-17 08:06:04 +00001996 if (dev->num_tc)
1997 netif_setup_tc(dev, txq);
1998
Alexander Duyck024e9672013-01-10 08:57:46 +00001999 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002000 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002001#ifdef CONFIG_XPS
2002 netif_reset_xps_queues_gt(dev, txq);
2003#endif
2004 }
John Fastabendf0796d52010-07-01 13:21:57 +00002005 }
Tom Herberte6484932010-10-18 18:04:39 +00002006
2007 dev->real_num_tx_queues = txq;
2008 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002009}
2010EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002011
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002012#ifdef CONFIG_RPS
2013/**
2014 * netif_set_real_num_rx_queues - set actual number of RX queues used
2015 * @dev: Network device
2016 * @rxq: Actual number of RX queues
2017 *
2018 * This must be called either with the rtnl_lock held or before
2019 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002020 * negative error code. If called before registration, it always
2021 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002022 */
2023int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2024{
2025 int rc;
2026
Tom Herbertbd25fa72010-10-18 18:00:16 +00002027 if (rxq < 1 || rxq > dev->num_rx_queues)
2028 return -EINVAL;
2029
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002030 if (dev->reg_state == NETREG_REGISTERED) {
2031 ASSERT_RTNL();
2032
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002033 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2034 rxq);
2035 if (rc)
2036 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002037 }
2038
2039 dev->real_num_rx_queues = rxq;
2040 return 0;
2041}
2042EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2043#endif
2044
Ben Hutchings2c530402012-07-10 10:55:09 +00002045/**
2046 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002047 *
2048 * This routine should set an upper limit on the number of RSS queues
2049 * used by default by multiqueue devices.
2050 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002051int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002052{
2053 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2054}
2055EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2056
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002057static inline void __netif_reschedule(struct Qdisc *q)
2058{
2059 struct softnet_data *sd;
2060 unsigned long flags;
2061
2062 local_irq_save(flags);
2063 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002064 q->next_sched = NULL;
2065 *sd->output_queue_tailp = q;
2066 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002067 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2068 local_irq_restore(flags);
2069}
2070
David S. Miller37437bb2008-07-16 02:15:04 -07002071void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002072{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002073 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2074 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002075}
2076EXPORT_SYMBOL(__netif_schedule);
2077
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002078void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002079{
David S. Miller3578b0c2010-08-03 00:24:04 -07002080 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002081 struct softnet_data *sd;
2082 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002083
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002084 local_irq_save(flags);
2085 sd = &__get_cpu_var(softnet_data);
2086 skb->next = sd->completion_queue;
2087 sd->completion_queue = skb;
2088 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2089 local_irq_restore(flags);
2090 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002091}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002092EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002093
2094void dev_kfree_skb_any(struct sk_buff *skb)
2095{
2096 if (in_irq() || irqs_disabled())
2097 dev_kfree_skb_irq(skb);
2098 else
2099 dev_kfree_skb(skb);
2100}
2101EXPORT_SYMBOL(dev_kfree_skb_any);
2102
2103
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002104/**
2105 * netif_device_detach - mark device as removed
2106 * @dev: network device
2107 *
2108 * Mark device as removed from system and therefore no longer available.
2109 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002110void netif_device_detach(struct net_device *dev)
2111{
2112 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2113 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002114 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002115 }
2116}
2117EXPORT_SYMBOL(netif_device_detach);
2118
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002119/**
2120 * netif_device_attach - mark device as attached
2121 * @dev: network device
2122 *
2123 * Mark device as attached from system and restart if needed.
2124 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002125void netif_device_attach(struct net_device *dev)
2126{
2127 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2128 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002129 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002130 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002131 }
2132}
2133EXPORT_SYMBOL(netif_device_attach);
2134
Ben Hutchings36c92472012-01-17 07:57:56 +00002135static void skb_warn_bad_offload(const struct sk_buff *skb)
2136{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002137 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002138 struct net_device *dev = skb->dev;
2139 const char *driver = "";
2140
Ben Greearc846ad92013-04-19 10:45:52 +00002141 if (!net_ratelimit())
2142 return;
2143
Ben Hutchings36c92472012-01-17 07:57:56 +00002144 if (dev && dev->dev.parent)
2145 driver = dev_driver_string(dev->dev.parent);
2146
2147 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2148 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002149 driver, dev ? &dev->features : &null_features,
2150 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002151 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2152 skb_shinfo(skb)->gso_type, skb->ip_summed);
2153}
2154
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155/*
2156 * Invalidate hardware checksum when packet is to be mangled, and
2157 * complete checksum manually on outgoing path.
2158 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002159int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160{
Al Virod3bc23e2006-11-14 21:24:49 -08002161 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002162 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
Patrick McHardy84fa7932006-08-29 16:44:56 -07002164 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002165 goto out_set_summed;
2166
2167 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002168 skb_warn_bad_offload(skb);
2169 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 }
2171
Eric Dumazetcef401d2013-01-25 20:34:37 +00002172 /* Before computing a checksum, we should make sure no frag could
2173 * be modified by an external entity : checksum could be wrong.
2174 */
2175 if (skb_has_shared_frag(skb)) {
2176 ret = __skb_linearize(skb);
2177 if (ret)
2178 goto out;
2179 }
2180
Michał Mirosław55508d62010-12-14 15:24:08 +00002181 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002182 BUG_ON(offset >= skb_headlen(skb));
2183 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2184
2185 offset += skb->csum_offset;
2186 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2187
2188 if (skb_cloned(skb) &&
2189 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2191 if (ret)
2192 goto out;
2193 }
2194
Herbert Xua0308472007-10-15 01:47:15 -07002195 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002196out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002198out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 return ret;
2200}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002201EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002203__be16 skb_network_protocol(struct sk_buff *skb)
2204{
2205 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002206 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002207
Pravin B Shelar19acc322013-05-07 20:41:07 +00002208 /* Tunnel gso handlers can set protocol to ethernet. */
2209 if (type == htons(ETH_P_TEB)) {
2210 struct ethhdr *eth;
2211
2212 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2213 return 0;
2214
2215 eth = (struct ethhdr *)skb_mac_header(skb);
2216 type = eth->h_proto;
2217 }
2218
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002219 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002220 struct vlan_hdr *vh;
2221
2222 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2223 return 0;
2224
2225 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2226 type = vh->h_vlan_encapsulated_proto;
2227 vlan_depth += VLAN_HLEN;
2228 }
2229
2230 return type;
2231}
2232
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002233/**
2234 * skb_mac_gso_segment - mac layer segmentation handler.
2235 * @skb: buffer to segment
2236 * @features: features for the output path (see dev->features)
2237 */
2238struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2239 netdev_features_t features)
2240{
2241 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2242 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002243 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002244
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002245 if (unlikely(!type))
2246 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002247
2248 __skb_pull(skb, skb->mac_len);
2249
2250 rcu_read_lock();
2251 list_for_each_entry_rcu(ptype, &offload_base, list) {
2252 if (ptype->type == type && ptype->callbacks.gso_segment) {
2253 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2254 int err;
2255
2256 err = ptype->callbacks.gso_send_check(skb);
2257 segs = ERR_PTR(err);
2258 if (err || skb_gso_ok(skb, features))
2259 break;
2260 __skb_push(skb, (skb->data -
2261 skb_network_header(skb)));
2262 }
2263 segs = ptype->callbacks.gso_segment(skb, features);
2264 break;
2265 }
2266 }
2267 rcu_read_unlock();
2268
2269 __skb_push(skb, skb->data - skb_mac_header(skb));
2270
2271 return segs;
2272}
2273EXPORT_SYMBOL(skb_mac_gso_segment);
2274
2275
Cong Wang12b00042013-02-05 16:36:38 +00002276/* openvswitch calls this on rx path, so we need a different check.
2277 */
2278static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2279{
2280 if (tx_path)
2281 return skb->ip_summed != CHECKSUM_PARTIAL;
2282 else
2283 return skb->ip_summed == CHECKSUM_NONE;
2284}
2285
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002286/**
Cong Wang12b00042013-02-05 16:36:38 +00002287 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002288 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002289 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002290 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002291 *
2292 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002293 *
2294 * It may return NULL if the skb requires no segmentation. This is
2295 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002296 */
Cong Wang12b00042013-02-05 16:36:38 +00002297struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2298 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002299{
Cong Wang12b00042013-02-05 16:36:38 +00002300 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002301 int err;
2302
Ben Hutchings36c92472012-01-17 07:57:56 +00002303 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002304
Herbert Xua430a432006-07-08 13:34:56 -07002305 if (skb_header_cloned(skb) &&
2306 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2307 return ERR_PTR(err);
2308 }
2309
Pravin B Shelar68c33162013-02-14 14:02:41 +00002310 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002311 skb_reset_mac_header(skb);
2312 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002313
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002314 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002315}
Cong Wang12b00042013-02-05 16:36:38 +00002316EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002317
Herbert Xufb286bb2005-11-10 13:01:24 -08002318/* Take action when hardware reception checksum errors are detected. */
2319#ifdef CONFIG_BUG
2320void netdev_rx_csum_fault(struct net_device *dev)
2321{
2322 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002323 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002324 dump_stack();
2325 }
2326}
2327EXPORT_SYMBOL(netdev_rx_csum_fault);
2328#endif
2329
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330/* Actually, we should eliminate this check as soon as we know, that:
2331 * 1. IOMMU is present and allows to map all the memory.
2332 * 2. No high memory really exists on this machine.
2333 */
2334
Eric Dumazet9092c652010-04-02 13:34:49 -07002335static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002337#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002339 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002340 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2341 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2342 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002343 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002344 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002347 if (PCI_DMA_BUS_IS_PHYS) {
2348 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Eric Dumazet9092c652010-04-02 13:34:49 -07002350 if (!pdev)
2351 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002352 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002353 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2354 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002355 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2356 return 1;
2357 }
2358 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002359#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 return 0;
2361}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002363struct dev_gso_cb {
2364 void (*destructor)(struct sk_buff *skb);
2365};
2366
2367#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2368
2369static void dev_gso_skb_destructor(struct sk_buff *skb)
2370{
2371 struct dev_gso_cb *cb;
2372
2373 do {
2374 struct sk_buff *nskb = skb->next;
2375
2376 skb->next = nskb->next;
2377 nskb->next = NULL;
2378 kfree_skb(nskb);
2379 } while (skb->next);
2380
2381 cb = DEV_GSO_CB(skb);
2382 if (cb->destructor)
2383 cb->destructor(skb);
2384}
2385
2386/**
2387 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2388 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002389 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002390 *
2391 * This function segments the given skb and stores the list of segments
2392 * in skb->next.
2393 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002394static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002395{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002396 struct sk_buff *segs;
2397
Herbert Xu576a30e2006-06-27 13:22:38 -07002398 segs = skb_gso_segment(skb, features);
2399
2400 /* Verifying header integrity only. */
2401 if (!segs)
2402 return 0;
2403
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002404 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002405 return PTR_ERR(segs);
2406
2407 skb->next = segs;
2408 DEV_GSO_CB(skb)->destructor = skb->destructor;
2409 skb->destructor = dev_gso_skb_destructor;
2410
2411 return 0;
2412}
2413
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002414static netdev_features_t harmonize_features(struct sk_buff *skb,
2415 __be16 protocol, netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002416{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002417 if (skb->ip_summed != CHECKSUM_NONE &&
2418 !can_checksum_protocol(features, protocol)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002419 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002420 } else if (illegal_highdma(skb->dev, skb)) {
2421 features &= ~NETIF_F_SG;
2422 }
2423
2424 return features;
2425}
2426
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002427netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002428{
2429 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002430 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002431
Ben Hutchings30b678d2012-07-30 15:57:00 +00002432 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2433 features &= ~NETIF_F_GSO_MASK;
2434
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002435 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002436 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2437 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002438 } else if (!vlan_tx_tag_present(skb)) {
2439 return harmonize_features(skb, protocol, features);
2440 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002441
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002442 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2443 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002444
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002445 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002446 return harmonize_features(skb, protocol, features);
2447 } else {
2448 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002449 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2450 NETIF_F_HW_VLAN_STAG_TX;
Jesse Grossf01a5232011-01-09 06:23:31 +00002451 return harmonize_features(skb, protocol, features);
2452 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002453}
Jesse Grossf01a5232011-01-09 06:23:31 +00002454EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002455
John Fastabend6afff0c2010-06-16 14:18:12 +00002456/*
2457 * Returns true if either:
2458 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002459 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002460 */
2461static inline int skb_needs_linearize(struct sk_buff *skb,
Patrick McHardy6708c9e2013-05-01 22:36:49 +00002462 netdev_features_t features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002463{
Jesse Gross02932ce2011-01-09 06:23:34 +00002464 return skb_is_nonlinear(skb) &&
2465 ((skb_has_frag_list(skb) &&
2466 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002467 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002468 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002469}
2470
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002471int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2472 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002473{
Stephen Hemminger00829822008-11-20 20:14:53 -08002474 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002475 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002476 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002477
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002478 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002479 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002480
Eric Dumazet93f154b2009-05-18 22:19:19 -07002481 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002482 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002483 * its hot in this cpu cache
2484 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002485 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2486 skb_dst_drop(skb);
2487
Jesse Grossfc741212011-01-09 06:23:32 +00002488 features = netif_skb_features(skb);
2489
Jesse Gross7b9c6092010-10-20 13:56:04 +00002490 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002491 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2492 skb = __vlan_put_tag(skb, skb->vlan_proto,
2493 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002494 if (unlikely(!skb))
2495 goto out;
2496
2497 skb->vlan_tci = 0;
2498 }
2499
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002500 /* If encapsulation offload request, verify we are testing
2501 * hardware encapsulation features instead of standard
2502 * features for the netdev
2503 */
2504 if (skb->encapsulation)
2505 features &= dev->hw_enc_features;
2506
Jesse Grossfc741212011-01-09 06:23:32 +00002507 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002508 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002509 goto out_kfree_skb;
2510 if (skb->next)
2511 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002512 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002513 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002514 __skb_linearize(skb))
2515 goto out_kfree_skb;
2516
2517 /* If packet is not checksummed and device does not
2518 * support checksumming for this protocol, complete
2519 * checksumming here.
2520 */
2521 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002522 if (skb->encapsulation)
2523 skb_set_inner_transport_header(skb,
2524 skb_checksum_start_offset(skb));
2525 else
2526 skb_set_transport_header(skb,
2527 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002528 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002529 skb_checksum_help(skb))
2530 goto out_kfree_skb;
2531 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002532 }
2533
Eric Dumazetb40863c2012-09-18 20:44:49 +00002534 if (!list_empty(&ptype_all))
2535 dev_queue_xmit_nit(skb, dev);
2536
Koki Sanagiec764bf2011-05-30 21:48:34 +00002537 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002538 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002539 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002540 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002541 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002542 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002543 }
2544
Herbert Xu576a30e2006-06-27 13:22:38 -07002545gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002546 do {
2547 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002548
2549 skb->next = nskb->next;
2550 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002551
Eric Dumazetb40863c2012-09-18 20:44:49 +00002552 if (!list_empty(&ptype_all))
2553 dev_queue_xmit_nit(nskb, dev);
2554
Koki Sanagiec764bf2011-05-30 21:48:34 +00002555 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002556 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002557 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002558 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002559 if (rc & ~NETDEV_TX_MASK)
2560 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002561 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002562 skb->next = nskb;
2563 return rc;
2564 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002565 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002566 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002567 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002568 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002569
Patrick McHardy572a9d72009-11-10 06:14:14 +00002570out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002571 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002572 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002573 consume_skb(skb);
2574 return rc;
2575 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002576out_kfree_skb:
2577 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002578out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002579 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002580}
2581
Eric Dumazet1def9232013-01-10 12:36:42 +00002582static void qdisc_pkt_len_init(struct sk_buff *skb)
2583{
2584 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2585
2586 qdisc_skb_cb(skb)->pkt_len = skb->len;
2587
2588 /* To get more precise estimation of bytes sent on wire,
2589 * we add to pkt_len the headers size of all segments
2590 */
2591 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002592 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002593 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002594
Eric Dumazet757b8b12013-01-15 21:14:21 -08002595 /* mac layer + network layer */
2596 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2597
2598 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002599 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2600 hdr_len += tcp_hdrlen(skb);
2601 else
2602 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002603
2604 if (shinfo->gso_type & SKB_GSO_DODGY)
2605 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2606 shinfo->gso_size);
2607
2608 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002609 }
2610}
2611
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002612static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2613 struct net_device *dev,
2614 struct netdev_queue *txq)
2615{
2616 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002617 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002618 int rc;
2619
Eric Dumazet1def9232013-01-10 12:36:42 +00002620 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002621 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002622 /*
2623 * Heuristic to force contended enqueues to serialize on a
2624 * separate lock before trying to get qdisc main lock.
2625 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2626 * and dequeue packets faster.
2627 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002628 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002629 if (unlikely(contended))
2630 spin_lock(&q->busylock);
2631
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002632 spin_lock(root_lock);
2633 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2634 kfree_skb(skb);
2635 rc = NET_XMIT_DROP;
2636 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002637 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002638 /*
2639 * This is a work-conserving queue; there are no old skbs
2640 * waiting to be sent out; and the qdisc is not running -
2641 * xmit the skb directly.
2642 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002643 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2644 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002645
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002646 qdisc_bstats_update(q, skb);
2647
Eric Dumazet79640a42010-06-02 05:09:29 -07002648 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2649 if (unlikely(contended)) {
2650 spin_unlock(&q->busylock);
2651 contended = false;
2652 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002653 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002654 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002655 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002656
2657 rc = NET_XMIT_SUCCESS;
2658 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002659 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002660 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002661 if (qdisc_run_begin(q)) {
2662 if (unlikely(contended)) {
2663 spin_unlock(&q->busylock);
2664 contended = false;
2665 }
2666 __qdisc_run(q);
2667 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002668 }
2669 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002670 if (unlikely(contended))
2671 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002672 return rc;
2673}
2674
Neil Horman5bc14212011-11-22 05:10:51 +00002675#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2676static void skb_update_prio(struct sk_buff *skb)
2677{
Igor Maravic6977a792011-11-25 07:44:54 +00002678 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002679
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002680 if (!skb->priority && skb->sk && map) {
2681 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2682
2683 if (prioidx < map->priomap_len)
2684 skb->priority = map->priomap[prioidx];
2685 }
Neil Horman5bc14212011-11-22 05:10:51 +00002686}
2687#else
2688#define skb_update_prio(skb)
2689#endif
2690
Eric Dumazet745e20f2010-09-29 13:23:09 -07002691static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002692#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002693
Dave Jonesd29f7492008-07-22 14:09:06 -07002694/**
Michel Machado95603e22012-06-12 10:16:35 +00002695 * dev_loopback_xmit - loop back @skb
2696 * @skb: buffer to transmit
2697 */
2698int dev_loopback_xmit(struct sk_buff *skb)
2699{
2700 skb_reset_mac_header(skb);
2701 __skb_pull(skb, skb_network_offset(skb));
2702 skb->pkt_type = PACKET_LOOPBACK;
2703 skb->ip_summed = CHECKSUM_UNNECESSARY;
2704 WARN_ON(!skb_dst(skb));
2705 skb_dst_force(skb);
2706 netif_rx_ni(skb);
2707 return 0;
2708}
2709EXPORT_SYMBOL(dev_loopback_xmit);
2710
2711/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002712 * dev_queue_xmit - transmit a buffer
2713 * @skb: buffer to transmit
2714 *
2715 * Queue a buffer for transmission to a network device. The caller must
2716 * have set the device and priority and built the buffer before calling
2717 * this function. The function can be called from an interrupt.
2718 *
2719 * A negative errno code is returned on a failure. A success does not
2720 * guarantee the frame will be transmitted as it may be dropped due
2721 * to congestion or traffic shaping.
2722 *
2723 * -----------------------------------------------------------------------------------
2724 * I notice this method can also return errors from the queue disciplines,
2725 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2726 * be positive.
2727 *
2728 * Regardless of the return value, the skb is consumed, so it is currently
2729 * difficult to retry a send to this method. (You can bump the ref count
2730 * before sending to hold a reference for retry if you are careful.)
2731 *
2732 * When calling this method, interrupts MUST be enabled. This is because
2733 * the BH enable code must have IRQs enabled so that it will not deadlock.
2734 * --BLG
2735 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736int dev_queue_xmit(struct sk_buff *skb)
2737{
2738 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002739 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 struct Qdisc *q;
2741 int rc = -ENOMEM;
2742
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002743 skb_reset_mac_header(skb);
2744
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002745 /* Disable soft irqs for various locks below. Also
2746 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002748 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Neil Horman5bc14212011-11-22 05:10:51 +00002750 skb_update_prio(skb);
2751
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002752 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002753 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002756 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002758 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002760 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002761 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 }
2763
2764 /* The device has no queue. Common case for software devices:
2765 loopback, all the sorts of tunnels...
2766
Herbert Xu932ff272006-06-09 12:20:56 -07002767 Really, it is unlikely that netif_tx_lock protection is necessary
2768 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 counters.)
2770 However, it is possible, that they rely on protection
2771 made by us here.
2772
2773 Check this and shot the lock. It is not prone from deadlocks.
2774 Either shot noqueue qdisc, it is even simpler 8)
2775 */
2776 if (dev->flags & IFF_UP) {
2777 int cpu = smp_processor_id(); /* ok because BHs are off */
2778
David S. Millerc773e842008-07-08 23:13:53 -07002779 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780
Eric Dumazet745e20f2010-09-29 13:23:09 -07002781 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2782 goto recursion_alert;
2783
David S. Millerc773e842008-07-08 23:13:53 -07002784 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Tom Herbert734664982011-11-28 16:32:44 +00002786 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002787 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002788 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002789 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002790 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002791 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 goto out;
2793 }
2794 }
David S. Millerc773e842008-07-08 23:13:53 -07002795 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002796 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2797 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 } else {
2799 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002800 * unfortunately
2801 */
2802recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002803 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2804 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 }
2806 }
2807
2808 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002809 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 kfree_skb(skb);
2812 return rc;
2813out:
Herbert Xud4828d82006-06-22 02:28:18 -07002814 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 return rc;
2816}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002817EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
2819
2820/*=======================================================================
2821 Receiver routines
2822 =======================================================================*/
2823
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002824int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002825EXPORT_SYMBOL(netdev_max_backlog);
2826
Eric Dumazet3b098e22010-05-15 23:57:10 -07002827int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002828int netdev_budget __read_mostly = 300;
2829int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002831/* Called with irq disabled */
2832static inline void ____napi_schedule(struct softnet_data *sd,
2833 struct napi_struct *napi)
2834{
2835 list_add_tail(&napi->poll_list, &sd->poll_list);
2836 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2837}
2838
Eric Dumazetdf334542010-03-24 19:13:54 +00002839#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002840
2841/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002842struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002843EXPORT_SYMBOL(rps_sock_flow_table);
2844
Ingo Molnarc5905af2012-02-24 08:31:31 +01002845struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002846
Ben Hutchingsc4454772011-01-19 11:03:53 +00002847static struct rps_dev_flow *
2848set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2849 struct rps_dev_flow *rflow, u16 next_cpu)
2850{
Ben Hutchings09994d12011-10-03 04:42:46 +00002851 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002852#ifdef CONFIG_RFS_ACCEL
2853 struct netdev_rx_queue *rxqueue;
2854 struct rps_dev_flow_table *flow_table;
2855 struct rps_dev_flow *old_rflow;
2856 u32 flow_id;
2857 u16 rxq_index;
2858 int rc;
2859
2860 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002861 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2862 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002863 goto out;
2864 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2865 if (rxq_index == skb_get_rx_queue(skb))
2866 goto out;
2867
2868 rxqueue = dev->_rx + rxq_index;
2869 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2870 if (!flow_table)
2871 goto out;
2872 flow_id = skb->rxhash & flow_table->mask;
2873 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2874 rxq_index, flow_id);
2875 if (rc < 0)
2876 goto out;
2877 old_rflow = rflow;
2878 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002879 rflow->filter = rc;
2880 if (old_rflow->filter == rflow->filter)
2881 old_rflow->filter = RPS_NO_FILTER;
2882 out:
2883#endif
2884 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002885 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002886 }
2887
Ben Hutchings09994d12011-10-03 04:42:46 +00002888 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002889 return rflow;
2890}
2891
Tom Herbert0a9627f2010-03-16 08:03:29 +00002892/*
2893 * get_rps_cpu is called from netif_receive_skb and returns the target
2894 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002895 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002896 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002897static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2898 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002899{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002900 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002901 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002902 struct rps_dev_flow_table *flow_table;
2903 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002904 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002905 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002906
Tom Herbert0a9627f2010-03-16 08:03:29 +00002907 if (skb_rx_queue_recorded(skb)) {
2908 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002909 if (unlikely(index >= dev->real_num_rx_queues)) {
2910 WARN_ONCE(dev->real_num_rx_queues > 1,
2911 "%s received packet on queue %u, but number "
2912 "of RX queues is %u\n",
2913 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002914 goto done;
2915 }
2916 rxqueue = dev->_rx + index;
2917 } else
2918 rxqueue = dev->_rx;
2919
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002920 map = rcu_dereference(rxqueue->rps_map);
2921 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002922 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00002923 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00002924 tcpu = map->cpus[0];
2925 if (cpu_online(tcpu))
2926 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002927 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00002928 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00002929 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002930 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002931 }
2932
Changli Gao2d47b452010-08-17 19:00:56 +00002933 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00002934 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00002935 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002936
Tom Herbertfec5e652010-04-16 16:01:27 -07002937 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2938 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2939 if (flow_table && sock_flow_table) {
2940 u16 next_cpu;
2941 struct rps_dev_flow *rflow;
2942
2943 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2944 tcpu = rflow->cpu;
2945
2946 next_cpu = sock_flow_table->ents[skb->rxhash &
2947 sock_flow_table->mask];
2948
2949 /*
2950 * If the desired CPU (where last recvmsg was done) is
2951 * different from current CPU (one in the rx-queue flow
2952 * table entry), switch if one of the following holds:
2953 * - Current CPU is unset (equal to RPS_NO_CPU).
2954 * - Current CPU is offline.
2955 * - The current CPU's queue tail has advanced beyond the
2956 * last packet that was enqueued using this table entry.
2957 * This guarantees that all previous packets for the flow
2958 * have been dequeued, thus preserving in order delivery.
2959 */
2960 if (unlikely(tcpu != next_cpu) &&
2961 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2962 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00002963 rflow->last_qtail)) >= 0)) {
2964 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002965 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00002966 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00002967
Tom Herbertfec5e652010-04-16 16:01:27 -07002968 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2969 *rflowp = rflow;
2970 cpu = tcpu;
2971 goto done;
2972 }
2973 }
2974
Tom Herbert0a9627f2010-03-16 08:03:29 +00002975 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07002976 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00002977
2978 if (cpu_online(tcpu)) {
2979 cpu = tcpu;
2980 goto done;
2981 }
2982 }
2983
2984done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00002985 return cpu;
2986}
2987
Ben Hutchingsc4454772011-01-19 11:03:53 +00002988#ifdef CONFIG_RFS_ACCEL
2989
2990/**
2991 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2992 * @dev: Device on which the filter was set
2993 * @rxq_index: RX queue index
2994 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2995 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2996 *
2997 * Drivers that implement ndo_rx_flow_steer() should periodically call
2998 * this function for each installed filter and remove the filters for
2999 * which it returns %true.
3000 */
3001bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3002 u32 flow_id, u16 filter_id)
3003{
3004 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3005 struct rps_dev_flow_table *flow_table;
3006 struct rps_dev_flow *rflow;
3007 bool expire = true;
3008 int cpu;
3009
3010 rcu_read_lock();
3011 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3012 if (flow_table && flow_id <= flow_table->mask) {
3013 rflow = &flow_table->flows[flow_id];
3014 cpu = ACCESS_ONCE(rflow->cpu);
3015 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3016 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3017 rflow->last_qtail) <
3018 (int)(10 * flow_table->mask)))
3019 expire = false;
3020 }
3021 rcu_read_unlock();
3022 return expire;
3023}
3024EXPORT_SYMBOL(rps_may_expire_flow);
3025
3026#endif /* CONFIG_RFS_ACCEL */
3027
Tom Herbert0a9627f2010-03-16 08:03:29 +00003028/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003029static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003030{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003031 struct softnet_data *sd = data;
3032
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003033 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003034 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003035}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003036
Tom Herbertfec5e652010-04-16 16:01:27 -07003037#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003038
3039/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003040 * Check if this softnet_data structure is another cpu one
3041 * If yes, queue it to our IPI list and return 1
3042 * If no, return 0
3043 */
3044static int rps_ipi_queued(struct softnet_data *sd)
3045{
3046#ifdef CONFIG_RPS
3047 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3048
3049 if (sd != mysd) {
3050 sd->rps_ipi_next = mysd->rps_ipi_list;
3051 mysd->rps_ipi_list = sd;
3052
3053 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3054 return 1;
3055 }
3056#endif /* CONFIG_RPS */
3057 return 0;
3058}
3059
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003060#ifdef CONFIG_NET_FLOW_LIMIT
3061int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3062#endif
3063
3064static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3065{
3066#ifdef CONFIG_NET_FLOW_LIMIT
3067 struct sd_flow_limit *fl;
3068 struct softnet_data *sd;
3069 unsigned int old_flow, new_flow;
3070
3071 if (qlen < (netdev_max_backlog >> 1))
3072 return false;
3073
3074 sd = &__get_cpu_var(softnet_data);
3075
3076 rcu_read_lock();
3077 fl = rcu_dereference(sd->flow_limit);
3078 if (fl) {
3079 new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
3080 old_flow = fl->history[fl->history_head];
3081 fl->history[fl->history_head] = new_flow;
3082
3083 fl->history_head++;
3084 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3085
3086 if (likely(fl->buckets[old_flow]))
3087 fl->buckets[old_flow]--;
3088
3089 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3090 fl->count++;
3091 rcu_read_unlock();
3092 return true;
3093 }
3094 }
3095 rcu_read_unlock();
3096#endif
3097 return false;
3098}
3099
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003100/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003101 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3102 * queue (may be a remote CPU queue).
3103 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003104static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3105 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003106{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003107 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003108 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003109 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003110
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003111 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003112
3113 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003114
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003115 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003116 qlen = skb_queue_len(&sd->input_pkt_queue);
3117 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003118 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003119enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003120 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003121 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003122 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003123 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003124 return NET_RX_SUCCESS;
3125 }
3126
Eric Dumazetebda37c22010-05-06 23:51:21 +00003127 /* Schedule NAPI for backlog device
3128 * We can use non atomic operation since we own the queue lock
3129 */
3130 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003131 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003132 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003133 }
3134 goto enqueue;
3135 }
3136
Changli Gaodee42872010-05-02 05:42:16 +00003137 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003138 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003139
Tom Herbert0a9627f2010-03-16 08:03:29 +00003140 local_irq_restore(flags);
3141
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003142 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003143 kfree_skb(skb);
3144 return NET_RX_DROP;
3145}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147/**
3148 * netif_rx - post buffer to the network code
3149 * @skb: buffer to post
3150 *
3151 * This function receives a packet from a device driver and queues it for
3152 * the upper (protocol) levels to process. It always succeeds. The buffer
3153 * may be dropped during processing for congestion control or by the
3154 * protocol layers.
3155 *
3156 * return values:
3157 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 * NET_RX_DROP (packet was dropped)
3159 *
3160 */
3161
3162int netif_rx(struct sk_buff *skb)
3163{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003164 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
3166 /* if netpoll wants it, pretend we never saw it */
3167 if (netpoll_rx(skb))
3168 return NET_RX_DROP;
3169
Eric Dumazet588f0332011-11-15 04:12:55 +00003170 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171
Koki Sanagicf66ba52010-08-23 18:45:02 +09003172 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003173#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003174 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003175 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003176 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177
Changli Gaocece1942010-08-07 20:35:43 -07003178 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003179 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003180
3181 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003182 if (cpu < 0)
3183 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003184
3185 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3186
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003187 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003188 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003189 } else
3190#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003191 {
3192 unsigned int qtail;
3193 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3194 put_cpu();
3195 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003196 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003198EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199
3200int netif_rx_ni(struct sk_buff *skb)
3201{
3202 int err;
3203
3204 preempt_disable();
3205 err = netif_rx(skb);
3206 if (local_softirq_pending())
3207 do_softirq();
3208 preempt_enable();
3209
3210 return err;
3211}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212EXPORT_SYMBOL(netif_rx_ni);
3213
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214static void net_tx_action(struct softirq_action *h)
3215{
3216 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3217
3218 if (sd->completion_queue) {
3219 struct sk_buff *clist;
3220
3221 local_irq_disable();
3222 clist = sd->completion_queue;
3223 sd->completion_queue = NULL;
3224 local_irq_enable();
3225
3226 while (clist) {
3227 struct sk_buff *skb = clist;
3228 clist = clist->next;
3229
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003230 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003231 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 __kfree_skb(skb);
3233 }
3234 }
3235
3236 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003237 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238
3239 local_irq_disable();
3240 head = sd->output_queue;
3241 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003242 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 local_irq_enable();
3244
3245 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003246 struct Qdisc *q = head;
3247 spinlock_t *root_lock;
3248
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 head = head->next_sched;
3250
David S. Miller5fb66222008-08-02 20:02:43 -07003251 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003252 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003253 smp_mb__before_clear_bit();
3254 clear_bit(__QDISC_STATE_SCHED,
3255 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003256 qdisc_run(q);
3257 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003259 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003260 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003261 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003262 } else {
3263 smp_mb__before_clear_bit();
3264 clear_bit(__QDISC_STATE_SCHED,
3265 &q->state);
3266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 }
3268 }
3269 }
3270}
3271
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003272#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3273 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003274/* This hook is defined here for ATM LANE */
3275int (*br_fdb_test_addr_hook)(struct net_device *dev,
3276 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003277EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003278#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280#ifdef CONFIG_NET_CLS_ACT
3281/* TODO: Maybe we should just force sch_ingress to be compiled in
3282 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3283 * a compare and 2 stores extra right now if we dont have it on
3284 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003285 * NOTE: This doesn't stop any functionality; if you dont have
3286 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 *
3288 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003289static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003292 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003293 int result = TC_ACT_OK;
3294 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003295
Stephen Hemmingerde384832010-08-01 00:33:23 -07003296 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003297 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3298 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003299 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 }
3301
Herbert Xuf697c3e2007-10-14 00:38:47 -07003302 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3303 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3304
David S. Miller838740002008-07-17 00:53:03 -07003305 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003306 if (q != &noop_qdisc) {
David S. Miller838740002008-07-17 00:53:03 -07003307 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003308 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3309 result = qdisc_enqueue_root(skb, q);
David S. Miller838740002008-07-17 00:53:03 -07003310 spin_unlock(qdisc_lock(q));
3311 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003312
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 return result;
3314}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003315
3316static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3317 struct packet_type **pt_prev,
3318 int *ret, struct net_device *orig_dev)
3319{
Eric Dumazet24824a02010-10-02 06:11:55 +00003320 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3321
3322 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003323 goto out;
3324
3325 if (*pt_prev) {
3326 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3327 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003328 }
3329
Eric Dumazet24824a02010-10-02 06:11:55 +00003330 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003331 case TC_ACT_SHOT:
3332 case TC_ACT_STOLEN:
3333 kfree_skb(skb);
3334 return NULL;
3335 }
3336
3337out:
3338 skb->tc_verd = 0;
3339 return skb;
3340}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341#endif
3342
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003343/**
3344 * netdev_rx_handler_register - register receive handler
3345 * @dev: device to register a handler for
3346 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003347 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003348 *
3349 * Register a receive hander for a device. This handler will then be
3350 * called from __netif_receive_skb. A negative errno code is returned
3351 * on a failure.
3352 *
3353 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003354 *
3355 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003356 */
3357int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003358 rx_handler_func_t *rx_handler,
3359 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003360{
3361 ASSERT_RTNL();
3362
3363 if (dev->rx_handler)
3364 return -EBUSY;
3365
Eric Dumazet00cfec32013-03-29 03:01:22 +00003366 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003367 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003368 rcu_assign_pointer(dev->rx_handler, rx_handler);
3369
3370 return 0;
3371}
3372EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3373
3374/**
3375 * netdev_rx_handler_unregister - unregister receive handler
3376 * @dev: device to unregister a handler from
3377 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003378 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003379 *
3380 * The caller must hold the rtnl_mutex.
3381 */
3382void netdev_rx_handler_unregister(struct net_device *dev)
3383{
3384
3385 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003386 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003387 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3388 * section has a guarantee to see a non NULL rx_handler_data
3389 * as well.
3390 */
3391 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003392 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003393}
3394EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3395
Mel Gormanb4b9e352012-07-31 16:44:26 -07003396/*
3397 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3398 * the special handling of PFMEMALLOC skbs.
3399 */
3400static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3401{
3402 switch (skb->protocol) {
3403 case __constant_htons(ETH_P_ARP):
3404 case __constant_htons(ETH_P_IP):
3405 case __constant_htons(ETH_P_IPV6):
3406 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003407 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003408 return true;
3409 default:
3410 return false;
3411 }
3412}
3413
David S. Miller9754e292013-02-14 15:57:38 -05003414static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415{
3416 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003417 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003418 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003419 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003420 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08003422 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423
Eric Dumazet588f0332011-11-15 04:12:55 +00003424 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003425
Koki Sanagicf66ba52010-08-23 18:45:02 +09003426 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003427
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003429 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003430 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003432 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003433
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003434 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003435 if (!skb_transport_header_was_set(skb))
3436 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003437 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
3439 pt_prev = NULL;
3440
3441 rcu_read_lock();
3442
David S. Miller63d8ea72011-02-28 10:48:59 -08003443another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003444 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003445
3446 __this_cpu_inc(softnet_data.processed);
3447
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003448 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3449 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003450 skb = vlan_untag(skb);
3451 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003452 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003453 }
3454
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455#ifdef CONFIG_NET_CLS_ACT
3456 if (skb->tc_verd & TC_NCLS) {
3457 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3458 goto ncls;
3459 }
3460#endif
3461
David S. Miller9754e292013-02-14 15:57:38 -05003462 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003463 goto skip_taps;
3464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003466 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003467 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003468 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 pt_prev = ptype;
3470 }
3471 }
3472
Mel Gormanb4b9e352012-07-31 16:44:26 -07003473skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003475 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3476 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003477 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478ncls:
3479#endif
3480
David S. Miller9754e292013-02-14 15:57:38 -05003481 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003482 goto drop;
3483
John Fastabend24257172011-10-10 09:16:41 +00003484 if (vlan_tx_tag_present(skb)) {
3485 if (pt_prev) {
3486 ret = deliver_skb(skb, pt_prev, orig_dev);
3487 pt_prev = NULL;
3488 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003489 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003490 goto another_round;
3491 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003492 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003493 }
3494
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003495 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003496 if (rx_handler) {
3497 if (pt_prev) {
3498 ret = deliver_skb(skb, pt_prev, orig_dev);
3499 pt_prev = NULL;
3500 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003501 switch (rx_handler(&skb)) {
3502 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003503 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003504 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003505 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003506 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003507 case RX_HANDLER_EXACT:
3508 deliver_exact = true;
3509 case RX_HANDLER_PASS:
3510 break;
3511 default:
3512 BUG();
3513 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003516 if (vlan_tx_nonzero_tag_present(skb))
3517 skb->pkt_type = PACKET_OTHERHOST;
3518
David S. Miller63d8ea72011-02-28 10:48:59 -08003519 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003520 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003523 list_for_each_entry_rcu(ptype,
3524 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003525 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003526 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3527 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003528 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003529 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 pt_prev = ptype;
3531 }
3532 }
3533
3534 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003535 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003536 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003537 else
3538 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003540drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003541 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 kfree_skb(skb);
3543 /* Jamal, now you will not able to escape explaining
3544 * me how you were going to use this. :-)
3545 */
3546 ret = NET_RX_DROP;
3547 }
3548
Mel Gormanb4b9e352012-07-31 16:44:26 -07003549unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003551out:
David S. Miller9754e292013-02-14 15:57:38 -05003552 return ret;
3553}
3554
3555static int __netif_receive_skb(struct sk_buff *skb)
3556{
3557 int ret;
3558
3559 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3560 unsigned long pflags = current->flags;
3561
3562 /*
3563 * PFMEMALLOC skbs are special, they should
3564 * - be delivered to SOCK_MEMALLOC sockets only
3565 * - stay away from userspace
3566 * - have bounded memory usage
3567 *
3568 * Use PF_MEMALLOC as this saves us from propagating the allocation
3569 * context down to all allocation sites.
3570 */
3571 current->flags |= PF_MEMALLOC;
3572 ret = __netif_receive_skb_core(skb, true);
3573 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3574 } else
3575 ret = __netif_receive_skb_core(skb, false);
3576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 return ret;
3578}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003579
3580/**
3581 * netif_receive_skb - process receive buffer from network
3582 * @skb: buffer to process
3583 *
3584 * netif_receive_skb() is the main receive data processing function.
3585 * It always succeeds. The buffer may be dropped during processing
3586 * for congestion control or by the protocol layers.
3587 *
3588 * This function may only be called from softirq context and interrupts
3589 * should be enabled.
3590 *
3591 * Return values (usually ignored):
3592 * NET_RX_SUCCESS: no congestion
3593 * NET_RX_DROP: packet was dropped
3594 */
3595int netif_receive_skb(struct sk_buff *skb)
3596{
Eric Dumazet588f0332011-11-15 04:12:55 +00003597 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003598
Richard Cochranc1f19b52010-07-17 08:49:36 +00003599 if (skb_defer_rx_timestamp(skb))
3600 return NET_RX_SUCCESS;
3601
Eric Dumazetdf334542010-03-24 19:13:54 +00003602#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003603 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003604 struct rps_dev_flow voidflow, *rflow = &voidflow;
3605 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003606
Eric Dumazet3b098e22010-05-15 23:57:10 -07003607 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003608
Eric Dumazet3b098e22010-05-15 23:57:10 -07003609 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003610
Eric Dumazet3b098e22010-05-15 23:57:10 -07003611 if (cpu >= 0) {
3612 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3613 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003614 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003615 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003616 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003617 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003618#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003619 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003620}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003621EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
Eric Dumazet88751272010-04-19 05:07:33 +00003623/* Network device is going away, flush any packets still pending
3624 * Called with irqs disabled.
3625 */
Changli Gao152102c2010-03-30 20:16:22 +00003626static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003627{
Changli Gao152102c2010-03-30 20:16:22 +00003628 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003629 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003630 struct sk_buff *skb, *tmp;
3631
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003632 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003633 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003634 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003635 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003636 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003637 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003638 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003639 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003640 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003641
3642 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3643 if (skb->dev == dev) {
3644 __skb_unlink(skb, &sd->process_queue);
3645 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003646 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003647 }
3648 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003649}
3650
Herbert Xud565b0a2008-12-15 23:38:52 -08003651static int napi_gro_complete(struct sk_buff *skb)
3652{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003653 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003654 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003655 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003656 int err = -ENOENT;
3657
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003658 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3659
Herbert Xufc59f9a2009-04-14 15:11:06 -07003660 if (NAPI_GRO_CB(skb)->count == 1) {
3661 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003662 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003663 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003664
3665 rcu_read_lock();
3666 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003667 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003668 continue;
3669
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003670 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003671 break;
3672 }
3673 rcu_read_unlock();
3674
3675 if (err) {
3676 WARN_ON(&ptype->list == head);
3677 kfree_skb(skb);
3678 return NET_RX_SUCCESS;
3679 }
3680
3681out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003682 return netif_receive_skb(skb);
3683}
3684
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003685/* napi->gro_list contains packets ordered by age.
3686 * youngest packets at the head of it.
3687 * Complete skbs in reverse order to reduce latencies.
3688 */
3689void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003690{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003691 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003692
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003693 /* scan list and build reverse chain */
3694 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3695 skb->prev = prev;
3696 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003697 }
3698
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003699 for (skb = prev; skb; skb = prev) {
3700 skb->next = NULL;
3701
3702 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3703 return;
3704
3705 prev = skb->prev;
3706 napi_gro_complete(skb);
3707 napi->gro_count--;
3708 }
3709
Herbert Xud565b0a2008-12-15 23:38:52 -08003710 napi->gro_list = NULL;
3711}
Eric Dumazet86cac582010-08-31 18:25:32 +00003712EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003713
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003714static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3715{
3716 struct sk_buff *p;
3717 unsigned int maclen = skb->dev->hard_header_len;
3718
3719 for (p = napi->gro_list; p; p = p->next) {
3720 unsigned long diffs;
3721
3722 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3723 diffs |= p->vlan_tci ^ skb->vlan_tci;
3724 if (maclen == ETH_HLEN)
3725 diffs |= compare_ether_header(skb_mac_header(p),
3726 skb_gro_mac_header(skb));
3727 else if (!diffs)
3728 diffs = memcmp(skb_mac_header(p),
3729 skb_gro_mac_header(skb),
3730 maclen);
3731 NAPI_GRO_CB(p)->same_flow = !diffs;
3732 NAPI_GRO_CB(p)->flush = 0;
3733 }
3734}
3735
Rami Rosenbb728822012-11-28 21:55:25 +00003736static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003737{
3738 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003739 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003740 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003741 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003742 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003743 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003744
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003745 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003746 goto normal;
3747
David S. Miller21dc3302010-08-23 00:13:46 -07003748 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003749 goto normal;
3750
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003751 gro_list_prepare(napi, skb);
3752
Herbert Xud565b0a2008-12-15 23:38:52 -08003753 rcu_read_lock();
3754 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003755 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003756 continue;
3757
Herbert Xu86911732009-01-29 14:19:50 +00003758 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003759 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003760 NAPI_GRO_CB(skb)->same_flow = 0;
3761 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003762 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003763
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003764 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003765 break;
3766 }
3767 rcu_read_unlock();
3768
3769 if (&ptype->list == head)
3770 goto normal;
3771
Herbert Xu0da2afd52008-12-26 14:57:42 -08003772 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003773 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003774
Herbert Xud565b0a2008-12-15 23:38:52 -08003775 if (pp) {
3776 struct sk_buff *nskb = *pp;
3777
3778 *pp = nskb->next;
3779 nskb->next = NULL;
3780 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003781 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003782 }
3783
Herbert Xu0da2afd52008-12-26 14:57:42 -08003784 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003785 goto ok;
3786
Herbert Xu4ae55442009-02-08 18:00:36 +00003787 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003788 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003789
Herbert Xu4ae55442009-02-08 18:00:36 +00003790 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003791 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003792 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003793 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003794 skb->next = napi->gro_list;
3795 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003796 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003797
Herbert Xuad0f99042009-02-01 01:24:55 -08003798pull:
Herbert Xucb189782009-05-26 18:50:31 +00003799 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3800 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3801
3802 BUG_ON(skb->end - skb->tail < grow);
3803
3804 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3805
3806 skb->tail += grow;
3807 skb->data_len -= grow;
3808
3809 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003810 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003811
Eric Dumazet9e903e02011-10-18 21:00:24 +00003812 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003813 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003814 memmove(skb_shinfo(skb)->frags,
3815 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003816 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003817 }
Herbert Xuad0f99042009-02-01 01:24:55 -08003818 }
3819
Herbert Xud565b0a2008-12-15 23:38:52 -08003820ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003821 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003822
3823normal:
Herbert Xuad0f99042009-02-01 01:24:55 -08003824 ret = GRO_NORMAL;
3825 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003826}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003827
Herbert Xu96e93ea2009-01-06 10:49:34 -08003828
Rami Rosenbb728822012-11-28 21:55:25 +00003829static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003830{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003831 switch (ret) {
3832 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003833 if (netif_receive_skb(skb))
3834 ret = GRO_DROP;
3835 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003836
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003837 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003838 kfree_skb(skb);
3839 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003840
Eric Dumazetdaa86542012-04-19 07:07:40 +00003841 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003842 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3843 kmem_cache_free(skbuff_head_cache, skb);
3844 else
3845 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003846 break;
3847
Ben Hutchings5b252f02009-10-29 07:17:09 +00003848 case GRO_HELD:
3849 case GRO_MERGED:
3850 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003851 }
3852
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003853 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003854}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003855
Eric Dumazetca07e432012-10-06 22:28:06 +00003856static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003857{
Eric Dumazetca07e432012-10-06 22:28:06 +00003858 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3859 const skb_frag_t *frag0 = &pinfo->frags[0];
3860
Herbert Xu78a478d2009-05-26 18:50:21 +00003861 NAPI_GRO_CB(skb)->data_offset = 0;
3862 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003863 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003864
Herbert Xu78d3fd02009-05-26 18:50:23 +00003865 if (skb->mac_header == skb->tail &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003866 pinfo->nr_frags &&
3867 !PageHighMem(skb_frag_page(frag0))) {
3868 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3869 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003870 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003871}
Herbert Xu78a478d2009-05-26 18:50:21 +00003872
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003873gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003874{
Herbert Xu86911732009-01-29 14:19:50 +00003875 skb_gro_reset_offset(skb);
3876
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003877 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003878}
3879EXPORT_SYMBOL(napi_gro_receive);
3880
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003881static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003882{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003883 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003884 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3885 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003886 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003887 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003888 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003889
3890 napi->skb = skb;
3891}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003892
Herbert Xu76620aa2009-04-16 02:02:07 -07003893struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003894{
Herbert Xu5d38a072009-01-04 16:13:40 -08003895 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003896
3897 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003898 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3899 if (skb)
3900 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003901 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003902 return skb;
3903}
Herbert Xu76620aa2009-04-16 02:02:07 -07003904EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003905
Rami Rosenbb728822012-11-28 21:55:25 +00003906static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003907 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003908{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003909 switch (ret) {
3910 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003911 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003912 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003913
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003914 if (ret == GRO_HELD)
3915 skb_gro_pull(skb, -ETH_HLEN);
3916 else if (netif_receive_skb(skb))
3917 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003918 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003919
3920 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003921 case GRO_MERGED_FREE:
3922 napi_reuse_skb(napi, skb);
3923 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003924
3925 case GRO_MERGED:
3926 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003927 }
3928
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003929 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003930}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003931
Eric Dumazet4adb9c42012-05-18 20:49:06 +00003932static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003933{
Herbert Xu76620aa2009-04-16 02:02:07 -07003934 struct sk_buff *skb = napi->skb;
3935 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003936 unsigned int hlen;
3937 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003938
3939 napi->skb = NULL;
3940
3941 skb_reset_mac_header(skb);
3942 skb_gro_reset_offset(skb);
3943
Herbert Xua5b1cf22009-05-26 18:50:28 +00003944 off = skb_gro_offset(skb);
3945 hlen = off + sizeof(*eth);
3946 eth = skb_gro_header_fast(skb, off);
3947 if (skb_gro_header_hard(skb, hlen)) {
3948 eth = skb_gro_header_slow(skb, hlen, off);
3949 if (unlikely(!eth)) {
3950 napi_reuse_skb(napi, skb);
3951 skb = NULL;
3952 goto out;
3953 }
Herbert Xu76620aa2009-04-16 02:02:07 -07003954 }
3955
3956 skb_gro_pull(skb, sizeof(*eth));
3957
3958 /*
3959 * This works because the only protocols we care about don't require
3960 * special handling. We'll fix it up properly at the end.
3961 */
3962 skb->protocol = eth->h_proto;
3963
3964out:
3965 return skb;
3966}
Herbert Xu76620aa2009-04-16 02:02:07 -07003967
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003968gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07003969{
3970 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003971
3972 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003973 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003974
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003975 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08003976}
3977EXPORT_SYMBOL(napi_gro_frags);
3978
Eric Dumazete326bed2010-04-22 00:22:45 -07003979/*
3980 * net_rps_action sends any pending IPI's for rps.
3981 * Note: called with local irq disabled, but exits with local irq enabled.
3982 */
3983static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3984{
3985#ifdef CONFIG_RPS
3986 struct softnet_data *remsd = sd->rps_ipi_list;
3987
3988 if (remsd) {
3989 sd->rps_ipi_list = NULL;
3990
3991 local_irq_enable();
3992
3993 /* Send pending IPI's to kick RPS processing on remote cpus. */
3994 while (remsd) {
3995 struct softnet_data *next = remsd->rps_ipi_next;
3996
3997 if (cpu_online(remsd->cpu))
3998 __smp_call_function_single(remsd->cpu,
3999 &remsd->csd, 0);
4000 remsd = next;
4001 }
4002 } else
4003#endif
4004 local_irq_enable();
4005}
4006
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004007static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008{
4009 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004010 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011
Eric Dumazete326bed2010-04-22 00:22:45 -07004012#ifdef CONFIG_RPS
4013 /* Check if we have pending ipi, its better to send them now,
4014 * not waiting net_rx_action() end.
4015 */
4016 if (sd->rps_ipi_list) {
4017 local_irq_disable();
4018 net_rps_action_and_irq_enable(sd);
4019 }
4020#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004021 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004022 local_irq_disable();
4023 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004025 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026
Changli Gao6e7676c2010-04-27 15:07:33 -07004027 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004028 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004029 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004030 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004031 input_queue_head_incr(sd);
4032 if (++work >= quota) {
4033 local_irq_enable();
4034 return work;
4035 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037
Changli Gao6e7676c2010-04-27 15:07:33 -07004038 rps_lock(sd);
4039 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004040 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004041 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4042 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004043
Changli Gao6e7676c2010-04-27 15:07:33 -07004044 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004045 /*
4046 * Inline a custom version of __napi_complete().
4047 * only current cpu owns and manipulates this napi,
4048 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4049 * we can use a plain write instead of clear_bit(),
4050 * and we dont need an smp_mb() memory barrier.
4051 */
4052 list_del(&napi->poll_list);
4053 napi->state = 0;
4054
Changli Gao6e7676c2010-04-27 15:07:33 -07004055 quota = work + qlen;
4056 }
4057 rps_unlock(sd);
4058 }
4059 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004061 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062}
4063
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004064/**
4065 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004066 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004067 *
4068 * The entry's receive function will be scheduled to run
4069 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004070void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004071{
4072 unsigned long flags;
4073
4074 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004075 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004076 local_irq_restore(flags);
4077}
4078EXPORT_SYMBOL(__napi_schedule);
4079
Herbert Xud565b0a2008-12-15 23:38:52 -08004080void __napi_complete(struct napi_struct *n)
4081{
4082 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4083 BUG_ON(n->gro_list);
4084
4085 list_del(&n->poll_list);
4086 smp_mb__before_clear_bit();
4087 clear_bit(NAPI_STATE_SCHED, &n->state);
4088}
4089EXPORT_SYMBOL(__napi_complete);
4090
4091void napi_complete(struct napi_struct *n)
4092{
4093 unsigned long flags;
4094
4095 /*
4096 * don't let napi dequeue from the cpu poll list
4097 * just in case its running on a different cpu
4098 */
4099 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4100 return;
4101
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004102 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004103 local_irq_save(flags);
4104 __napi_complete(n);
4105 local_irq_restore(flags);
4106}
4107EXPORT_SYMBOL(napi_complete);
4108
4109void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4110 int (*poll)(struct napi_struct *, int), int weight)
4111{
4112 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004113 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004114 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004115 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004116 napi->poll = poll;
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00004117 if (weight > NAPI_POLL_WEIGHT)
4118 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4119 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004120 napi->weight = weight;
4121 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004122 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004123#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004124 spin_lock_init(&napi->poll_lock);
4125 napi->poll_owner = -1;
4126#endif
4127 set_bit(NAPI_STATE_SCHED, &napi->state);
4128}
4129EXPORT_SYMBOL(netif_napi_add);
4130
4131void netif_napi_del(struct napi_struct *napi)
4132{
4133 struct sk_buff *skb, *next;
4134
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004135 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004136 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004137
4138 for (skb = napi->gro_list; skb; skb = next) {
4139 next = skb->next;
4140 skb->next = NULL;
4141 kfree_skb(skb);
4142 }
4143
4144 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004145 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004146}
4147EXPORT_SYMBOL(netif_napi_del);
4148
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149static void net_rx_action(struct softirq_action *h)
4150{
Eric Dumazete326bed2010-04-22 00:22:45 -07004151 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004152 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004153 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004154 void *have;
4155
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 local_irq_disable();
4157
Eric Dumazete326bed2010-04-22 00:22:45 -07004158 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004159 struct napi_struct *n;
4160 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004162 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004163 * Allow this to run for 2 jiffies since which will allow
4164 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004165 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004166 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 goto softnet_break;
4168
4169 local_irq_enable();
4170
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004171 /* Even though interrupts have been re-enabled, this
4172 * access is safe because interrupts can only add new
4173 * entries to the tail of this list, and only ->poll()
4174 * calls can remove this head entry from the list.
4175 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004176 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004178 have = netpoll_poll_lock(n);
4179
4180 weight = n->weight;
4181
David S. Miller0a7606c2007-10-29 21:28:47 -07004182 /* This NAPI_STATE_SCHED test is for avoiding a race
4183 * with netpoll's poll_napi(). Only the entity which
4184 * obtains the lock and sees NAPI_STATE_SCHED set will
4185 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004186 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004187 */
4188 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004189 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004190 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004191 trace_napi_poll(n);
4192 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004193
4194 WARN_ON_ONCE(work > weight);
4195
4196 budget -= work;
4197
4198 local_irq_disable();
4199
4200 /* Drivers must not modify the NAPI state if they
4201 * consume the entire weight. In such cases this code
4202 * still "owns" the NAPI instance and therefore can
4203 * move the instance around on the list at-will.
4204 */
David S. Millerfed17f32008-01-07 21:00:40 -08004205 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004206 if (unlikely(napi_disable_pending(n))) {
4207 local_irq_enable();
4208 napi_complete(n);
4209 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004210 } else {
4211 if (n->gro_list) {
4212 /* flush too old packets
4213 * If HZ < 1000, flush all packets.
4214 */
4215 local_irq_enable();
4216 napi_gro_flush(n, HZ >= 1000);
4217 local_irq_disable();
4218 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004219 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004220 }
David S. Millerfed17f32008-01-07 21:00:40 -08004221 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004222
4223 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 }
4225out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004226 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004227
Chris Leechdb217332006-06-17 21:24:58 -07004228#ifdef CONFIG_NET_DMA
4229 /*
4230 * There may not be any more sk_buffs coming right now, so push
4231 * any pending DMA copies to hardware
4232 */
Dan Williams2ba05622009-01-06 11:38:14 -07004233 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004234#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004235
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 return;
4237
4238softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004239 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4241 goto out;
4242}
4243
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004244struct netdev_upper {
4245 struct net_device *dev;
4246 bool master;
4247 struct list_head list;
4248 struct rcu_head rcu;
4249 struct list_head search_list;
4250};
4251
4252static void __append_search_uppers(struct list_head *search_list,
4253 struct net_device *dev)
4254{
4255 struct netdev_upper *upper;
4256
4257 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4258 /* check if this upper is not already in search list */
4259 if (list_empty(&upper->search_list))
4260 list_add_tail(&upper->search_list, search_list);
4261 }
4262}
4263
4264static bool __netdev_search_upper_dev(struct net_device *dev,
4265 struct net_device *upper_dev)
4266{
4267 LIST_HEAD(search_list);
4268 struct netdev_upper *upper;
4269 struct netdev_upper *tmp;
4270 bool ret = false;
4271
4272 __append_search_uppers(&search_list, dev);
4273 list_for_each_entry(upper, &search_list, search_list) {
4274 if (upper->dev == upper_dev) {
4275 ret = true;
4276 break;
4277 }
4278 __append_search_uppers(&search_list, upper->dev);
4279 }
4280 list_for_each_entry_safe(upper, tmp, &search_list, search_list)
4281 INIT_LIST_HEAD(&upper->search_list);
4282 return ret;
4283}
4284
4285static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
4286 struct net_device *upper_dev)
4287{
4288 struct netdev_upper *upper;
4289
4290 list_for_each_entry(upper, &dev->upper_dev_list, list) {
4291 if (upper->dev == upper_dev)
4292 return upper;
4293 }
4294 return NULL;
4295}
4296
4297/**
4298 * netdev_has_upper_dev - Check if device is linked to an upper device
4299 * @dev: device
4300 * @upper_dev: upper device to check
4301 *
4302 * Find out if a device is linked to specified upper device and return true
4303 * in case it is. Note that this checks only immediate upper device,
4304 * not through a complete stack of devices. The caller must hold the RTNL lock.
4305 */
4306bool netdev_has_upper_dev(struct net_device *dev,
4307 struct net_device *upper_dev)
4308{
4309 ASSERT_RTNL();
4310
4311 return __netdev_find_upper(dev, upper_dev);
4312}
4313EXPORT_SYMBOL(netdev_has_upper_dev);
4314
4315/**
4316 * netdev_has_any_upper_dev - Check if device is linked to some device
4317 * @dev: device
4318 *
4319 * Find out if a device is linked to an upper device and return true in case
4320 * it is. The caller must hold the RTNL lock.
4321 */
4322bool netdev_has_any_upper_dev(struct net_device *dev)
4323{
4324 ASSERT_RTNL();
4325
4326 return !list_empty(&dev->upper_dev_list);
4327}
4328EXPORT_SYMBOL(netdev_has_any_upper_dev);
4329
4330/**
4331 * netdev_master_upper_dev_get - Get master upper device
4332 * @dev: device
4333 *
4334 * Find a master upper device and return pointer to it or NULL in case
4335 * it's not there. The caller must hold the RTNL lock.
4336 */
4337struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4338{
4339 struct netdev_upper *upper;
4340
4341 ASSERT_RTNL();
4342
4343 if (list_empty(&dev->upper_dev_list))
4344 return NULL;
4345
4346 upper = list_first_entry(&dev->upper_dev_list,
4347 struct netdev_upper, list);
4348 if (likely(upper->master))
4349 return upper->dev;
4350 return NULL;
4351}
4352EXPORT_SYMBOL(netdev_master_upper_dev_get);
4353
4354/**
4355 * netdev_master_upper_dev_get_rcu - Get master upper device
4356 * @dev: device
4357 *
4358 * Find a master upper device and return pointer to it or NULL in case
4359 * it's not there. The caller must hold the RCU read lock.
4360 */
4361struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4362{
4363 struct netdev_upper *upper;
4364
4365 upper = list_first_or_null_rcu(&dev->upper_dev_list,
4366 struct netdev_upper, list);
4367 if (upper && likely(upper->master))
4368 return upper->dev;
4369 return NULL;
4370}
4371EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4372
4373static int __netdev_upper_dev_link(struct net_device *dev,
4374 struct net_device *upper_dev, bool master)
4375{
4376 struct netdev_upper *upper;
4377
4378 ASSERT_RTNL();
4379
4380 if (dev == upper_dev)
4381 return -EBUSY;
4382
4383 /* To prevent loops, check if dev is not upper device to upper_dev. */
4384 if (__netdev_search_upper_dev(upper_dev, dev))
4385 return -EBUSY;
4386
4387 if (__netdev_find_upper(dev, upper_dev))
4388 return -EEXIST;
4389
4390 if (master && netdev_master_upper_dev_get(dev))
4391 return -EBUSY;
4392
4393 upper = kmalloc(sizeof(*upper), GFP_KERNEL);
4394 if (!upper)
4395 return -ENOMEM;
4396
4397 upper->dev = upper_dev;
4398 upper->master = master;
4399 INIT_LIST_HEAD(&upper->search_list);
4400
4401 /* Ensure that master upper link is always the first item in list. */
4402 if (master)
4403 list_add_rcu(&upper->list, &dev->upper_dev_list);
4404 else
4405 list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
4406 dev_hold(upper_dev);
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004407 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004408 return 0;
4409}
4410
4411/**
4412 * netdev_upper_dev_link - Add a link to the upper device
4413 * @dev: device
4414 * @upper_dev: new upper device
4415 *
4416 * Adds a link to device which is upper to this one. The caller must hold
4417 * the RTNL lock. On a failure a negative errno code is returned.
4418 * On success the reference counts are adjusted and the function
4419 * returns zero.
4420 */
4421int netdev_upper_dev_link(struct net_device *dev,
4422 struct net_device *upper_dev)
4423{
4424 return __netdev_upper_dev_link(dev, upper_dev, false);
4425}
4426EXPORT_SYMBOL(netdev_upper_dev_link);
4427
4428/**
4429 * netdev_master_upper_dev_link - Add a master link to the upper device
4430 * @dev: device
4431 * @upper_dev: new upper device
4432 *
4433 * Adds a link to device which is upper to this one. In this case, only
4434 * one master upper device can be linked, although other non-master devices
4435 * might be linked as well. The caller must hold the RTNL lock.
4436 * On a failure a negative errno code is returned. On success the reference
4437 * counts are adjusted and the function returns zero.
4438 */
4439int netdev_master_upper_dev_link(struct net_device *dev,
4440 struct net_device *upper_dev)
4441{
4442 return __netdev_upper_dev_link(dev, upper_dev, true);
4443}
4444EXPORT_SYMBOL(netdev_master_upper_dev_link);
4445
4446/**
4447 * netdev_upper_dev_unlink - Removes a link to upper device
4448 * @dev: device
4449 * @upper_dev: new upper device
4450 *
4451 * Removes a link to device which is upper to this one. The caller must hold
4452 * the RTNL lock.
4453 */
4454void netdev_upper_dev_unlink(struct net_device *dev,
4455 struct net_device *upper_dev)
4456{
4457 struct netdev_upper *upper;
4458
4459 ASSERT_RTNL();
4460
4461 upper = __netdev_find_upper(dev, upper_dev);
4462 if (!upper)
4463 return;
4464 list_del_rcu(&upper->list);
4465 dev_put(upper_dev);
4466 kfree_rcu(upper, rcu);
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004467 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004468}
4469EXPORT_SYMBOL(netdev_upper_dev_unlink);
4470
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004471static void dev_change_rx_flags(struct net_device *dev, int flags)
4472{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004473 const struct net_device_ops *ops = dev->netdev_ops;
4474
4475 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4476 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004477}
4478
Wang Chendad9b332008-06-18 01:48:28 -07004479static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07004480{
Eric Dumazetb536db92011-11-30 21:42:26 +00004481 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004482 kuid_t uid;
4483 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004484
Patrick McHardy24023452007-07-14 18:51:31 -07004485 ASSERT_RTNL();
4486
Wang Chendad9b332008-06-18 01:48:28 -07004487 dev->flags |= IFF_PROMISC;
4488 dev->promiscuity += inc;
4489 if (dev->promiscuity == 0) {
4490 /*
4491 * Avoid overflow.
4492 * If inc causes overflow, untouch promisc and return error.
4493 */
4494 if (inc < 0)
4495 dev->flags &= ~IFF_PROMISC;
4496 else {
4497 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004498 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4499 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004500 return -EOVERFLOW;
4501 }
4502 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004503 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004504 pr_info("device %s %s promiscuous mode\n",
4505 dev->name,
4506 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11004507 if (audit_enabled) {
4508 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004509 audit_log(current->audit_context, GFP_ATOMIC,
4510 AUDIT_ANOM_PROMISCUOUS,
4511 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4512 dev->name, (dev->flags & IFF_PROMISC),
4513 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07004514 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004515 from_kuid(&init_user_ns, uid),
4516 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05004517 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11004518 }
Patrick McHardy24023452007-07-14 18:51:31 -07004519
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004520 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07004521 }
Wang Chendad9b332008-06-18 01:48:28 -07004522 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004523}
4524
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525/**
4526 * dev_set_promiscuity - update promiscuity count on a device
4527 * @dev: device
4528 * @inc: modifier
4529 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07004530 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 * remains above zero the interface remains promiscuous. Once it hits zero
4532 * the device reverts back to normal filtering operation. A negative inc
4533 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07004534 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 */
Wang Chendad9b332008-06-18 01:48:28 -07004536int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537{
Eric Dumazetb536db92011-11-30 21:42:26 +00004538 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07004539 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540
Wang Chendad9b332008-06-18 01:48:28 -07004541 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07004542 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07004543 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07004544 if (dev->flags != old_flags)
4545 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07004546 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004548EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549
4550/**
4551 * dev_set_allmulti - update allmulti count on a device
4552 * @dev: device
4553 * @inc: modifier
4554 *
4555 * Add or remove reception of all multicast frames to a device. While the
4556 * count in the device remains above zero the interface remains listening
4557 * to all interfaces. Once it hits zero the device reverts back to normal
4558 * filtering operation. A negative @inc value is used to drop the counter
4559 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07004560 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 */
4562
Wang Chendad9b332008-06-18 01:48:28 -07004563int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564{
Eric Dumazetb536db92011-11-30 21:42:26 +00004565 unsigned int old_flags = dev->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566
Patrick McHardy24023452007-07-14 18:51:31 -07004567 ASSERT_RTNL();
4568
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07004570 dev->allmulti += inc;
4571 if (dev->allmulti == 0) {
4572 /*
4573 * Avoid overflow.
4574 * If inc causes overflow, untouch allmulti and return error.
4575 */
4576 if (inc < 0)
4577 dev->flags &= ~IFF_ALLMULTI;
4578 else {
4579 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004580 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4581 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07004582 return -EOVERFLOW;
4583 }
4584 }
Patrick McHardy24023452007-07-14 18:51:31 -07004585 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004586 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07004587 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07004588 }
Wang Chendad9b332008-06-18 01:48:28 -07004589 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004590}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004591EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07004592
4593/*
4594 * Upload unicast and multicast address lists to device and
4595 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08004596 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07004597 * are present.
4598 */
4599void __dev_set_rx_mode(struct net_device *dev)
4600{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004601 const struct net_device_ops *ops = dev->netdev_ops;
4602
Patrick McHardy4417da62007-06-27 01:28:10 -07004603 /* dev_open will call this function so the list will stay sane. */
4604 if (!(dev->flags&IFF_UP))
4605 return;
4606
4607 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09004608 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07004609
Jiri Pirko01789342011-08-16 06:29:00 +00004610 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004611 /* Unicast addresses changes may only happen under the rtnl,
4612 * therefore calling __dev_set_promiscuity here is safe.
4613 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004614 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004615 __dev_set_promiscuity(dev, 1);
Joe Perches2d348d12011-07-25 16:17:35 -07004616 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004617 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004618 __dev_set_promiscuity(dev, -1);
Joe Perches2d348d12011-07-25 16:17:35 -07004619 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07004620 }
Patrick McHardy4417da62007-06-27 01:28:10 -07004621 }
Jiri Pirko01789342011-08-16 06:29:00 +00004622
4623 if (ops->ndo_set_rx_mode)
4624 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004625}
4626
4627void dev_set_rx_mode(struct net_device *dev)
4628{
David S. Millerb9e40852008-07-15 00:15:08 -07004629 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004630 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07004631 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632}
4633
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004634/**
4635 * dev_get_flags - get flags reported to userspace
4636 * @dev: device
4637 *
4638 * Get the combination of flag bits exported through APIs to userspace.
4639 */
Eric Dumazet95c96172012-04-15 05:58:06 +00004640unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641{
Eric Dumazet95c96172012-04-15 05:58:06 +00004642 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643
4644 flags = (dev->flags & ~(IFF_PROMISC |
4645 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004646 IFF_RUNNING |
4647 IFF_LOWER_UP |
4648 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649 (dev->gflags & (IFF_PROMISC |
4650 IFF_ALLMULTI));
4651
Stefan Rompfb00055a2006-03-20 17:09:11 -08004652 if (netif_running(dev)) {
4653 if (netif_oper_up(dev))
4654 flags |= IFF_RUNNING;
4655 if (netif_carrier_ok(dev))
4656 flags |= IFF_LOWER_UP;
4657 if (netif_dormant(dev))
4658 flags |= IFF_DORMANT;
4659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660
4661 return flags;
4662}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004663EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664
Patrick McHardybd380812010-02-26 06:34:53 +00004665int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666{
Eric Dumazetb536db92011-11-30 21:42:26 +00004667 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004668 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669
Patrick McHardy24023452007-07-14 18:51:31 -07004670 ASSERT_RTNL();
4671
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 /*
4673 * Set the flags on our device.
4674 */
4675
4676 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4677 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4678 IFF_AUTOMEDIA)) |
4679 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4680 IFF_ALLMULTI));
4681
4682 /*
4683 * Load in the correct multicast list now the flags have changed.
4684 */
4685
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004686 if ((old_flags ^ flags) & IFF_MULTICAST)
4687 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004688
Patrick McHardy4417da62007-06-27 01:28:10 -07004689 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690
4691 /*
4692 * Have we downed the interface. We handle IFF_UP ourselves
4693 * according to user attempts to set it, rather than blindly
4694 * setting it.
4695 */
4696
4697 ret = 0;
4698 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004699 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700
4701 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004702 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 }
4704
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004706 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4707
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 dev->gflags ^= IFF_PROMISC;
4709 dev_set_promiscuity(dev, inc);
4710 }
4711
4712 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4713 is important. Some (broken) drivers set IFF_PROMISC, when
4714 IFF_ALLMULTI is requested not asking us and not reporting.
4715 */
4716 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004717 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4718
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 dev->gflags ^= IFF_ALLMULTI;
4720 dev_set_allmulti(dev, inc);
4721 }
4722
Patrick McHardybd380812010-02-26 06:34:53 +00004723 return ret;
4724}
4725
4726void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4727{
4728 unsigned int changes = dev->flags ^ old_flags;
4729
4730 if (changes & IFF_UP) {
4731 if (dev->flags & IFF_UP)
4732 call_netdevice_notifiers(NETDEV_UP, dev);
4733 else
4734 call_netdevice_notifiers(NETDEV_DOWN, dev);
4735 }
4736
4737 if (dev->flags & IFF_UP &&
4738 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4739 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4740}
4741
4742/**
4743 * dev_change_flags - change device settings
4744 * @dev: device
4745 * @flags: device state flags
4746 *
4747 * Change settings on device based state flags. The flags are
4748 * in the userspace exported format.
4749 */
Eric Dumazetb536db92011-11-30 21:42:26 +00004750int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00004751{
Eric Dumazetb536db92011-11-30 21:42:26 +00004752 int ret;
4753 unsigned int changes, old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004754
4755 ret = __dev_change_flags(dev, flags);
4756 if (ret < 0)
4757 return ret;
4758
4759 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004760 if (changes)
4761 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762
Patrick McHardybd380812010-02-26 06:34:53 +00004763 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764 return ret;
4765}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004766EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004768/**
4769 * dev_set_mtu - Change maximum transfer unit
4770 * @dev: device
4771 * @new_mtu: new transfer unit
4772 *
4773 * Change the maximum transfer size of the network device.
4774 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775int dev_set_mtu(struct net_device *dev, int new_mtu)
4776{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004777 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778 int err;
4779
4780 if (new_mtu == dev->mtu)
4781 return 0;
4782
4783 /* MTU must be positive. */
4784 if (new_mtu < 0)
4785 return -EINVAL;
4786
4787 if (!netif_device_present(dev))
4788 return -ENODEV;
4789
4790 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004791 if (ops->ndo_change_mtu)
4792 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793 else
4794 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004795
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00004796 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004797 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 return err;
4799}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004800EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004802/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00004803 * dev_set_group - Change group this device belongs to
4804 * @dev: device
4805 * @new_group: group this device should belong to
4806 */
4807void dev_set_group(struct net_device *dev, int new_group)
4808{
4809 dev->group = new_group;
4810}
4811EXPORT_SYMBOL(dev_set_group);
4812
4813/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004814 * dev_set_mac_address - Change Media Access Control Address
4815 * @dev: device
4816 * @sa: new address
4817 *
4818 * Change the hardware (MAC) address of the device
4819 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4821{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004822 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823 int err;
4824
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004825 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826 return -EOPNOTSUPP;
4827 if (sa->sa_family != dev->type)
4828 return -EINVAL;
4829 if (!netif_device_present(dev))
4830 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004831 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00004832 if (err)
4833 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00004834 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00004835 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04004836 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00004837 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004839EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004841/**
4842 * dev_change_carrier - Change device carrier
4843 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00004844 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00004845 *
4846 * Change device carrier
4847 */
4848int dev_change_carrier(struct net_device *dev, bool new_carrier)
4849{
4850 const struct net_device_ops *ops = dev->netdev_ops;
4851
4852 if (!ops->ndo_change_carrier)
4853 return -EOPNOTSUPP;
4854 if (!netif_device_present(dev))
4855 return -ENODEV;
4856 return ops->ndo_change_carrier(dev, new_carrier);
4857}
4858EXPORT_SYMBOL(dev_change_carrier);
4859
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860/**
4861 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004862 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863 *
4864 * Returns a suitable unique value for a new device interface
4865 * number. The caller must hold the rtnl semaphore or the
4866 * dev_base_lock to be sure it remains unique.
4867 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004868static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004870 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871 for (;;) {
4872 if (++ifindex <= 0)
4873 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004874 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00004875 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876 }
4877}
4878
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004880static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004882static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885}
4886
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004887static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004888{
Krishna Kumare93737b2009-12-08 22:26:02 +00004889 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004890
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004891 BUG_ON(dev_boot_phase);
4892 ASSERT_RTNL();
4893
Krishna Kumare93737b2009-12-08 22:26:02 +00004894 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004895 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004896 * for initialization unwind. Remove those
4897 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004898 */
4899 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00004900 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
4901 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004902
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004903 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004904 list_del(&dev->unreg_list);
4905 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004906 }
Eric Dumazet449f4542011-05-19 12:24:16 +00004907 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004908 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00004909 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004910
Octavian Purdila44345722010-12-13 12:44:07 +00004911 /* If device is running, close it first. */
4912 dev_close_many(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004913
Octavian Purdila44345722010-12-13 12:44:07 +00004914 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004915 /* And unlink it from device chain. */
4916 unlist_netdevice(dev);
4917
4918 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004919 }
4920
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004921 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004922
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004923 list_for_each_entry(dev, head, unreg_list) {
4924 /* Shutdown queueing discipline. */
4925 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004926
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004927
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004928 /* Notify protocols, that we are about to destroy
4929 this device. They should clean all the things.
4930 */
4931 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4932
Patrick McHardya2835762010-02-26 06:34:51 +00004933 if (!dev->rtnl_link_ops ||
4934 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4935 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4936
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004937 /*
4938 * Flush the unicast and multicast chains
4939 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004940 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004941 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004942
4943 if (dev->netdev_ops->ndo_uninit)
4944 dev->netdev_ops->ndo_uninit(dev);
4945
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004946 /* Notifier chain MUST detach us all upper devices. */
4947 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004948
4949 /* Remove entries from kobject tree */
4950 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00004951#ifdef CONFIG_XPS
4952 /* Remove XPS queueing entries */
4953 netif_reset_xps_queues_gt(dev, 0);
4954#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004955 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004956
Eric W. Biederman850a5452011-10-13 22:25:23 +00004957 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004958
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004959 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004960 dev_put(dev);
4961}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004962
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004963static void rollback_registered(struct net_device *dev)
4964{
4965 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004966
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004967 list_add(&dev->unreg_list, &single);
4968 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00004969 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004970}
4971
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004972static netdev_features_t netdev_fix_features(struct net_device *dev,
4973 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07004974{
Michał Mirosław57422dc2011-01-22 12:14:12 +00004975 /* Fix illegal checksum combinations */
4976 if ((features & NETIF_F_HW_CSUM) &&
4977 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004978 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00004979 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4980 }
4981
Herbert Xub63365a2008-10-23 01:11:29 -07004982 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004983 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04004984 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00004985 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07004986 }
4987
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004988 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
4989 !(features & NETIF_F_IP_CSUM)) {
4990 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
4991 features &= ~NETIF_F_TSO;
4992 features &= ~NETIF_F_TSO_ECN;
4993 }
4994
4995 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
4996 !(features & NETIF_F_IPV6_CSUM)) {
4997 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
4998 features &= ~NETIF_F_TSO6;
4999 }
5000
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005001 /* TSO ECN requires that TSO is present as well. */
5002 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5003 features &= ~NETIF_F_TSO_ECN;
5004
Michał Mirosław212b5732011-02-15 16:59:16 +00005005 /* Software GSO depends on SG. */
5006 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005007 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005008 features &= ~NETIF_F_GSO;
5009 }
5010
Michał Mirosławacd11302011-01-24 15:45:15 -08005011 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005012 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005013 /* maybe split UFO into V4 and V6? */
5014 if (!((features & NETIF_F_GEN_CSUM) ||
5015 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5016 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005017 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005018 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005019 features &= ~NETIF_F_UFO;
5020 }
5021
5022 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005023 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005024 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005025 features &= ~NETIF_F_UFO;
5026 }
5027 }
5028
5029 return features;
5030}
Herbert Xub63365a2008-10-23 01:11:29 -07005031
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005032int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005033{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005034 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005035 int err = 0;
5036
Michał Mirosław87267482011-04-12 09:56:38 +00005037 ASSERT_RTNL();
5038
Michał Mirosław5455c692011-02-15 16:59:17 +00005039 features = netdev_get_wanted_features(dev);
5040
5041 if (dev->netdev_ops->ndo_fix_features)
5042 features = dev->netdev_ops->ndo_fix_features(dev, features);
5043
5044 /* driver might be less strict about feature dependencies */
5045 features = netdev_fix_features(dev, features);
5046
5047 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005048 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005049
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005050 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5051 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005052
5053 if (dev->netdev_ops->ndo_set_features)
5054 err = dev->netdev_ops->ndo_set_features(dev, features);
5055
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005056 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005057 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005058 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5059 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005060 return -1;
5061 }
5062
5063 if (!err)
5064 dev->features = features;
5065
5066 return 1;
5067}
5068
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005069/**
5070 * netdev_update_features - recalculate device features
5071 * @dev: the device to check
5072 *
5073 * Recalculate dev->features set and send notifications if it
5074 * has changed. Should be called after driver or hardware dependent
5075 * conditions might have changed that influence the features.
5076 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005077void netdev_update_features(struct net_device *dev)
5078{
5079 if (__netdev_update_features(dev))
5080 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005081}
5082EXPORT_SYMBOL(netdev_update_features);
5083
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005085 * netdev_change_features - recalculate device features
5086 * @dev: the device to check
5087 *
5088 * Recalculate dev->features set and send notifications even
5089 * if they have not changed. Should be called instead of
5090 * netdev_update_features() if also dev->vlan_features might
5091 * have changed to allow the changes to be propagated to stacked
5092 * VLAN devices.
5093 */
5094void netdev_change_features(struct net_device *dev)
5095{
5096 __netdev_update_features(dev);
5097 netdev_features_change(dev);
5098}
5099EXPORT_SYMBOL(netdev_change_features);
5100
5101/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005102 * netif_stacked_transfer_operstate - transfer operstate
5103 * @rootdev: the root or lower level device to transfer state from
5104 * @dev: the device to transfer operstate to
5105 *
5106 * Transfer operational state from root to device. This is normally
5107 * called when a stacking relationship exists between the root
5108 * device and the device(a leaf device).
5109 */
5110void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5111 struct net_device *dev)
5112{
5113 if (rootdev->operstate == IF_OPER_DORMANT)
5114 netif_dormant_on(dev);
5115 else
5116 netif_dormant_off(dev);
5117
5118 if (netif_carrier_ok(rootdev)) {
5119 if (!netif_carrier_ok(dev))
5120 netif_carrier_on(dev);
5121 } else {
5122 if (netif_carrier_ok(dev))
5123 netif_carrier_off(dev);
5124 }
5125}
5126EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5127
Tom Herbertbf264142010-11-26 08:36:09 +00005128#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005129static int netif_alloc_rx_queues(struct net_device *dev)
5130{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005131 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005132 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005133
Tom Herbertbd25fa72010-10-18 18:00:16 +00005134 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005135
Tom Herbertbd25fa72010-10-18 18:00:16 +00005136 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005137 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005138 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005139
Tom Herbertbd25fa72010-10-18 18:00:16 +00005140 dev->_rx = rx;
5141
Tom Herbertbd25fa72010-10-18 18:00:16 +00005142 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005143 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005144 return 0;
5145}
Tom Herbertbf264142010-11-26 08:36:09 +00005146#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005147
Changli Gaoaa942102010-12-04 02:31:41 +00005148static void netdev_init_one_queue(struct net_device *dev,
5149 struct netdev_queue *queue, void *_unused)
5150{
5151 /* Initialize queue lock */
5152 spin_lock_init(&queue->_xmit_lock);
5153 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5154 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005155 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005156 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005157#ifdef CONFIG_BQL
5158 dql_init(&queue->dql, HZ);
5159#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005160}
5161
Tom Herberte6484932010-10-18 18:04:39 +00005162static int netif_alloc_netdev_queues(struct net_device *dev)
5163{
5164 unsigned int count = dev->num_tx_queues;
5165 struct netdev_queue *tx;
5166
5167 BUG_ON(count < 1);
5168
5169 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005170 if (!tx)
Tom Herberte6484932010-10-18 18:04:39 +00005171 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005172
Tom Herberte6484932010-10-18 18:04:39 +00005173 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005174
Tom Herberte6484932010-10-18 18:04:39 +00005175 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5176 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005177
5178 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005179}
5180
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005181/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 * register_netdevice - register a network device
5183 * @dev: device to register
5184 *
5185 * Take a completed network device structure and add it to the kernel
5186 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5187 * chain. 0 is returned on success. A negative errno code is returned
5188 * on a failure to set up the device, or if the name is a duplicate.
5189 *
5190 * Callers must hold the rtnl semaphore. You may want
5191 * register_netdev() instead of this.
5192 *
5193 * BUGS:
5194 * The locking appears insufficient to guarantee two parallel registers
5195 * will not get the same name.
5196 */
5197
5198int register_netdevice(struct net_device *dev)
5199{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005201 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202
5203 BUG_ON(dev_boot_phase);
5204 ASSERT_RTNL();
5205
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005206 might_sleep();
5207
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 /* When net_device's are persistent, this will be fatal. */
5209 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005210 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211
David S. Millerf1f28aa2008-07-15 00:08:33 -07005212 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005213 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 dev->iflink = -1;
5216
Gao feng828de4f2012-09-13 20:58:27 +00005217 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005218 if (ret < 0)
5219 goto out;
5220
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005222 if (dev->netdev_ops->ndo_init) {
5223 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 if (ret) {
5225 if (ret > 0)
5226 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005227 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 }
5229 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005230
Patrick McHardyf6469682013-04-19 02:04:27 +00005231 if (((dev->hw_features | dev->features) &
5232 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005233 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5234 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5235 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5236 ret = -EINVAL;
5237 goto err_uninit;
5238 }
5239
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005240 ret = -EBUSY;
5241 if (!dev->ifindex)
5242 dev->ifindex = dev_new_index(net);
5243 else if (__dev_get_by_index(net, dev->ifindex))
5244 goto err_uninit;
5245
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 if (dev->iflink == -1)
5247 dev->iflink = dev->ifindex;
5248
Michał Mirosław5455c692011-02-15 16:59:17 +00005249 /* Transfer changeable features to wanted_features and enable
5250 * software offloads (GSO and GRO).
5251 */
5252 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005253 dev->features |= NETIF_F_SOFT_FEATURES;
5254 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005256 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005257 if (!(dev->flags & IFF_LOOPBACK)) {
5258 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5259 if (dev->features & NETIF_F_ALL_CSUM) {
5260 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5261 dev->features |= NETIF_F_NOCACHE_COPY;
5262 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005263 }
5264
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005265 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005266 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005267 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005268
Pravin B Shelaree579672013-03-07 09:28:08 +00005269 /* Make NETIF_F_SG inheritable to tunnel devices.
5270 */
5271 dev->hw_enc_features |= NETIF_F_SG;
5272
Simon Horman0d89d202013-05-23 21:02:52 +00005273 /* Make NETIF_F_SG inheritable to MPLS.
5274 */
5275 dev->mpls_features |= NETIF_F_SG;
5276
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005277 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5278 ret = notifier_to_errno(ret);
5279 if (ret)
5280 goto err_uninit;
5281
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005282 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005283 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005284 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005285 dev->reg_state = NETREG_REGISTERED;
5286
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005287 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005288
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289 /*
5290 * Default initial state at registry is that the
5291 * device is present.
5292 */
5293
5294 set_bit(__LINK_STATE_PRESENT, &dev->state);
5295
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005296 linkwatch_init_dev(dev);
5297
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005300 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005301 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302
Jiri Pirko948b3372013-01-08 01:38:25 +00005303 /* If the device has permanent device address, driver should
5304 * set dev_addr and also addr_assign_type should be set to
5305 * NET_ADDR_PERM (default value).
5306 */
5307 if (dev->addr_assign_type == NET_ADDR_PERM)
5308 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5309
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005311 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005312 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005313 if (ret) {
5314 rollback_registered(dev);
5315 dev->reg_state = NETREG_UNREGISTERED;
5316 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005317 /*
5318 * Prevent userspace races by waiting until the network
5319 * device is fully setup before sending notifications.
5320 */
Patrick McHardya2835762010-02-26 06:34:51 +00005321 if (!dev->rtnl_link_ops ||
5322 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5323 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324
5325out:
5326 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005327
5328err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005329 if (dev->netdev_ops->ndo_uninit)
5330 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005331 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005333EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334
5335/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005336 * init_dummy_netdev - init a dummy network device for NAPI
5337 * @dev: device to init
5338 *
5339 * This takes a network device structure and initialize the minimum
5340 * amount of fields so it can be used to schedule NAPI polls without
5341 * registering a full blown interface. This is to be used by drivers
5342 * that need to tie several hardware interfaces to a single NAPI
5343 * poll scheduler due to HW limitations.
5344 */
5345int init_dummy_netdev(struct net_device *dev)
5346{
5347 /* Clear everything. Note we don't initialize spinlocks
5348 * are they aren't supposed to be taken by any of the
5349 * NAPI code and this dummy netdev is supposed to be
5350 * only ever used for NAPI polls
5351 */
5352 memset(dev, 0, sizeof(struct net_device));
5353
5354 /* make sure we BUG if trying to hit standard
5355 * register/unregister code path
5356 */
5357 dev->reg_state = NETREG_DUMMY;
5358
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005359 /* NAPI wants this */
5360 INIT_LIST_HEAD(&dev->napi_list);
5361
5362 /* a dummy interface is started by default */
5363 set_bit(__LINK_STATE_PRESENT, &dev->state);
5364 set_bit(__LINK_STATE_START, &dev->state);
5365
Eric Dumazet29b44332010-10-11 10:22:12 +00005366 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5367 * because users of this 'device' dont need to change
5368 * its refcount.
5369 */
5370
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005371 return 0;
5372}
5373EXPORT_SYMBOL_GPL(init_dummy_netdev);
5374
5375
5376/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377 * register_netdev - register a network device
5378 * @dev: device to register
5379 *
5380 * Take a completed network device structure and add it to the kernel
5381 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5382 * chain. 0 is returned on success. A negative errno code is returned
5383 * on a failure to set up the device, or if the name is a duplicate.
5384 *
Borislav Petkov38b4da382007-04-20 22:14:10 -07005385 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386 * and expands the device name if you passed a format string to
5387 * alloc_netdev.
5388 */
5389int register_netdev(struct net_device *dev)
5390{
5391 int err;
5392
5393 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 rtnl_unlock();
5396 return err;
5397}
5398EXPORT_SYMBOL(register_netdev);
5399
Eric Dumazet29b44332010-10-11 10:22:12 +00005400int netdev_refcnt_read(const struct net_device *dev)
5401{
5402 int i, refcnt = 0;
5403
5404 for_each_possible_cpu(i)
5405 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5406 return refcnt;
5407}
5408EXPORT_SYMBOL(netdev_refcnt_read);
5409
Ben Hutchings2c530402012-07-10 10:55:09 +00005410/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005412 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413 *
5414 * This is called when unregistering network devices.
5415 *
5416 * Any protocol or device that holds a reference should register
5417 * for netdevice notification, and cleanup and put back the
5418 * reference if they receive an UNREGISTER event.
5419 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005420 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005421 */
5422static void netdev_wait_allrefs(struct net_device *dev)
5423{
5424 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00005425 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426
Eric Dumazete014deb2009-11-17 05:59:21 +00005427 linkwatch_forget_dev(dev);
5428
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00005430 refcnt = netdev_refcnt_read(dev);
5431
5432 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005434 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435
5436 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005437 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438
Eric Dumazet748e2d92012-08-22 21:50:59 +00005439 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005440 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00005441 rtnl_lock();
5442
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005443 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5445 &dev->state)) {
5446 /* We must not have linkwatch events
5447 * pending on unregister. If this
5448 * happens, we simply run the queue
5449 * unscheduled, resulting in a noop
5450 * for this device.
5451 */
5452 linkwatch_run_queue();
5453 }
5454
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005455 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456
5457 rebroadcast_time = jiffies;
5458 }
5459
5460 msleep(250);
5461
Eric Dumazet29b44332010-10-11 10:22:12 +00005462 refcnt = netdev_refcnt_read(dev);
5463
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005465 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5466 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467 warning_time = jiffies;
5468 }
5469 }
5470}
5471
5472/* The sequence is:
5473 *
5474 * rtnl_lock();
5475 * ...
5476 * register_netdevice(x1);
5477 * register_netdevice(x2);
5478 * ...
5479 * unregister_netdevice(y1);
5480 * unregister_netdevice(y2);
5481 * ...
5482 * rtnl_unlock();
5483 * free_netdev(y1);
5484 * free_netdev(y2);
5485 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005486 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005488 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 * without deadlocking with linkwatch via keventd.
5490 * 2) Since we run with the RTNL semaphore not held, we can sleep
5491 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005492 *
5493 * We must not return until all unregister events added during
5494 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496void netdev_run_todo(void)
5497{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005498 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005499
Linus Torvalds1da177e2005-04-16 15:20:36 -07005500 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005501 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005502
5503 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005504
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005505
5506 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00005507 if (!list_empty(&list))
5508 rcu_barrier();
5509
Linus Torvalds1da177e2005-04-16 15:20:36 -07005510 while (!list_empty(&list)) {
5511 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005512 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005513 list_del(&dev->todo_list);
5514
Eric Dumazet748e2d92012-08-22 21:50:59 +00005515 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005516 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00005517 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00005518
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005519 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005520 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07005521 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005522 dump_stack();
5523 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005525
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005526 dev->reg_state = NETREG_UNREGISTERED;
5527
Changli Gao152102c2010-03-30 20:16:22 +00005528 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005529
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005530 netdev_wait_allrefs(dev);
5531
5532 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00005533 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00005534 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5535 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005536 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005537
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005538 if (dev->destructor)
5539 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005540
5541 /* Free network device */
5542 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005544}
5545
Ben Hutchings3cfde792010-07-09 09:11:52 +00005546/* Convert net_device_stats to rtnl_link_stats64. They have the same
5547 * fields in the same order, with only the type differing.
5548 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005549void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5550 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00005551{
5552#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005553 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5554 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00005555#else
5556 size_t i, n = sizeof(*stats64) / sizeof(u64);
5557 const unsigned long *src = (const unsigned long *)netdev_stats;
5558 u64 *dst = (u64 *)stats64;
5559
5560 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5561 sizeof(*stats64) / sizeof(u64));
5562 for (i = 0; i < n; i++)
5563 dst[i] = src[i];
5564#endif
5565}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00005566EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00005567
Eric Dumazetd83345a2009-11-16 03:36:51 +00005568/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005569 * dev_get_stats - get network device statistics
5570 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07005571 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005572 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00005573 * Get network statistics from device. Return @storage.
5574 * The device driver may provide its own method by setting
5575 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5576 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005577 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00005578struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5579 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005580{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005581 const struct net_device_ops *ops = dev->netdev_ops;
5582
Eric Dumazet28172732010-07-07 14:58:56 -07005583 if (ops->ndo_get_stats64) {
5584 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005585 ops->ndo_get_stats64(dev, storage);
5586 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00005587 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005588 } else {
5589 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07005590 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00005591 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07005592 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07005593}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005594EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005595
Eric Dumazet24824a02010-10-02 06:11:55 +00005596struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07005597{
Eric Dumazet24824a02010-10-02 06:11:55 +00005598 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07005599
Eric Dumazet24824a02010-10-02 06:11:55 +00005600#ifdef CONFIG_NET_CLS_ACT
5601 if (queue)
5602 return queue;
5603 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5604 if (!queue)
5605 return NULL;
5606 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00005607 queue->qdisc = &noop_qdisc;
5608 queue->qdisc_sleeping = &noop_qdisc;
5609 rcu_assign_pointer(dev->ingress_queue, queue);
5610#endif
5611 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07005612}
5613
Eric Dumazet2c60db02012-09-16 09:17:26 +00005614static const struct ethtool_ops default_ethtool_ops;
5615
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00005616void netdev_set_default_ethtool_ops(struct net_device *dev,
5617 const struct ethtool_ops *ops)
5618{
5619 if (dev->ethtool_ops == &default_ethtool_ops)
5620 dev->ethtool_ops = ops;
5621}
5622EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
5623
Linus Torvalds1da177e2005-04-16 15:20:36 -07005624/**
Tom Herbert36909ea2011-01-09 19:36:31 +00005625 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005626 * @sizeof_priv: size of private data to allocate space for
5627 * @name: device name format string
5628 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00005629 * @txqs: the number of TX subqueues to allocate
5630 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631 *
5632 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005633 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00005634 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635 */
Tom Herbert36909ea2011-01-09 19:36:31 +00005636struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5637 void (*setup)(struct net_device *),
5638 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005639{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005641 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005642 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005644 BUG_ON(strlen(name) >= sizeof(dev->name));
5645
Tom Herbert36909ea2011-01-09 19:36:31 +00005646 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005647 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00005648 return NULL;
5649 }
5650
Tom Herbert36909ea2011-01-09 19:36:31 +00005651#ifdef CONFIG_RPS
5652 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005653 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00005654 return NULL;
5655 }
5656#endif
5657
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005658 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005659 if (sizeof_priv) {
5660 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005661 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005662 alloc_size += sizeof_priv;
5663 }
5664 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005665 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005667 p = kzalloc(alloc_size, GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005668 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005670
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005671 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005672 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005673
Eric Dumazet29b44332010-10-11 10:22:12 +00005674 dev->pcpu_refcnt = alloc_percpu(int);
5675 if (!dev->pcpu_refcnt)
Tom Herberte6484932010-10-18 18:04:39 +00005676 goto free_p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005677
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00005679 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680
Jiri Pirko22bedad32010-04-01 21:22:57 +00005681 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005682 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00005683
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005684 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005686 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00005687 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005688
Herbert Xud565b0a2008-12-15 23:38:52 -08005689 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005690 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005691 INIT_LIST_HEAD(&dev->link_watch_list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005692 INIT_LIST_HEAD(&dev->upper_dev_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005693 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005694 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005695
5696 dev->num_tx_queues = txqs;
5697 dev->real_num_tx_queues = txqs;
5698 if (netif_alloc_netdev_queues(dev))
5699 goto free_all;
5700
5701#ifdef CONFIG_RPS
5702 dev->num_rx_queues = rxqs;
5703 dev->real_num_rx_queues = rxqs;
5704 if (netif_alloc_rx_queues(dev))
5705 goto free_all;
5706#endif
5707
Linus Torvalds1da177e2005-04-16 15:20:36 -07005708 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005709 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00005710 if (!dev->ethtool_ops)
5711 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005713
David S. Miller8d3bdbd2011-02-08 15:02:50 -08005714free_all:
5715 free_netdev(dev);
5716 return NULL;
5717
Eric Dumazet29b44332010-10-11 10:22:12 +00005718free_pcpu:
5719 free_percpu(dev->pcpu_refcnt);
Tom Herberted9af2e2010-11-09 10:47:30 +00005720 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005721#ifdef CONFIG_RPS
5722 kfree(dev->_rx);
5723#endif
5724
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005725free_p:
5726 kfree(p);
5727 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005728}
Tom Herbert36909ea2011-01-09 19:36:31 +00005729EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730
5731/**
5732 * free_netdev - free network device
5733 * @dev: device
5734 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005735 * This function does the last stage of destroying an allocated device
5736 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005737 * If this is the last reference then it will be freed.
5738 */
5739void free_netdev(struct net_device *dev)
5740{
Herbert Xud565b0a2008-12-15 23:38:52 -08005741 struct napi_struct *p, *n;
5742
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005743 release_net(dev_net(dev));
5744
David S. Millere8a04642008-07-17 00:34:19 -07005745 kfree(dev->_tx);
Tom Herbertfe822242010-11-09 10:47:38 +00005746#ifdef CONFIG_RPS
5747 kfree(dev->_rx);
5748#endif
David S. Millere8a04642008-07-17 00:34:19 -07005749
Eric Dumazet33d480c2011-08-11 19:30:52 +00005750 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00005751
Jiri Pirkof001fde2009-05-05 02:48:28 +00005752 /* Flush device addresses */
5753 dev_addr_flush(dev);
5754
Herbert Xud565b0a2008-12-15 23:38:52 -08005755 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5756 netif_napi_del(p);
5757
Eric Dumazet29b44332010-10-11 10:22:12 +00005758 free_percpu(dev->pcpu_refcnt);
5759 dev->pcpu_refcnt = NULL;
5760
Stephen Hemminger3041a062006-05-26 13:25:24 -07005761 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762 if (dev->reg_state == NETREG_UNINITIALIZED) {
5763 kfree((char *)dev - dev->padded);
5764 return;
5765 }
5766
5767 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5768 dev->reg_state = NETREG_RELEASED;
5769
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005770 /* will free via device release */
5771 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005772}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005773EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005774
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005775/**
5776 * synchronize_net - Synchronize with packet receive processing
5777 *
5778 * Wait for packets currently being received to be done.
5779 * Does not block later packets from starting.
5780 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005781void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782{
5783 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00005784 if (rtnl_is_locked())
5785 synchronize_rcu_expedited();
5786 else
5787 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005789EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790
5791/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005792 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005794 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005795 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005796 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005797 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005798 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005799 *
5800 * Callers must hold the rtnl semaphore. You may want
5801 * unregister_netdev() instead of this.
5802 */
5803
Eric Dumazet44a08732009-10-27 07:03:04 +00005804void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005805{
Herbert Xua6620712007-12-12 19:21:56 -08005806 ASSERT_RTNL();
5807
Eric Dumazet44a08732009-10-27 07:03:04 +00005808 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005809 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005810 } else {
5811 rollback_registered(dev);
5812 /* Finish processing unregister after unlock */
5813 net_set_todo(dev);
5814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815}
Eric Dumazet44a08732009-10-27 07:03:04 +00005816EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817
5818/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005819 * unregister_netdevice_many - unregister many devices
5820 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005821 */
5822void unregister_netdevice_many(struct list_head *head)
5823{
5824 struct net_device *dev;
5825
5826 if (!list_empty(head)) {
5827 rollback_registered_many(head);
5828 list_for_each_entry(dev, head, unreg_list)
5829 net_set_todo(dev);
5830 }
5831}
Eric Dumazet63c80992009-10-27 07:06:49 +00005832EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005833
5834/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835 * unregister_netdev - remove device from the kernel
5836 * @dev: device
5837 *
5838 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005839 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005840 *
5841 * This is just a wrapper for unregister_netdevice that takes
5842 * the rtnl semaphore. In general you want to use this and not
5843 * unregister_netdevice.
5844 */
5845void unregister_netdev(struct net_device *dev)
5846{
5847 rtnl_lock();
5848 unregister_netdevice(dev);
5849 rtnl_unlock();
5850}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005851EXPORT_SYMBOL(unregister_netdev);
5852
Eric W. Biedermance286d32007-09-12 13:53:49 +02005853/**
5854 * dev_change_net_namespace - move device to different nethost namespace
5855 * @dev: device
5856 * @net: network namespace
5857 * @pat: If not NULL name pattern to try if the current device name
5858 * is already taken in the destination network namespace.
5859 *
5860 * This function shuts down a device interface and moves it
5861 * to a new network namespace. On success 0 is returned, on
5862 * a failure a netagive errno code is returned.
5863 *
5864 * Callers must hold the rtnl semaphore.
5865 */
5866
5867int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5868{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005869 int err;
5870
5871 ASSERT_RTNL();
5872
5873 /* Don't allow namespace local devices to be moved. */
5874 err = -EINVAL;
5875 if (dev->features & NETIF_F_NETNS_LOCAL)
5876 goto out;
5877
5878 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02005879 if (dev->reg_state != NETREG_REGISTERED)
5880 goto out;
5881
5882 /* Get out if there is nothing todo */
5883 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005884 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005885 goto out;
5886
5887 /* Pick the destination device name, and ensure
5888 * we can use it in the destination network namespace.
5889 */
5890 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005891 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005892 /* We get here if we can't use the current device name */
5893 if (!pat)
5894 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00005895 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005896 goto out;
5897 }
5898
5899 /*
5900 * And now a mini version of register_netdevice unregister_netdevice.
5901 */
5902
5903 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005904 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005905
5906 /* And unlink it from device chain */
5907 err = -ENODEV;
5908 unlist_netdevice(dev);
5909
5910 synchronize_net();
5911
5912 /* Shutdown queueing discipline. */
5913 dev_shutdown(dev);
5914
5915 /* Notify protocols, that we are about to destroy
5916 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00005917
5918 Note that dev->reg_state stays at NETREG_REGISTERED.
5919 This is wanted because this way 8021q and macvlan know
5920 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02005921 */
5922 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00005923 rcu_barrier();
5924 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric W. Biedermand2237d32011-10-21 06:24:20 +00005925 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005926
5927 /*
5928 * Flush the unicast and multicast chains
5929 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005930 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005931 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005932
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005933 /* Send a netdev-removed uevent to the old namespace */
5934 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
5935
Eric W. Biedermance286d32007-09-12 13:53:49 +02005936 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005937 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005938
Eric W. Biedermance286d32007-09-12 13:53:49 +02005939 /* If there is an ifindex conflict assign a new one */
5940 if (__dev_get_by_index(net, dev->ifindex)) {
5941 int iflink = (dev->iflink == dev->ifindex);
5942 dev->ifindex = dev_new_index(net);
5943 if (iflink)
5944 dev->iflink = dev->ifindex;
5945 }
5946
Serge Hallyn4e66ae22012-12-03 16:17:12 +00005947 /* Send a netdev-add uevent to the new namespace */
5948 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
5949
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005950 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07005951 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005952 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005953
5954 /* Add the device back in the hashes */
5955 list_netdevice(dev);
5956
5957 /* Notify protocols, that a new device appeared. */
5958 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5959
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005960 /*
5961 * Prevent userspace races by waiting until the network
5962 * device is fully setup before sending notifications.
5963 */
5964 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5965
Eric W. Biedermance286d32007-09-12 13:53:49 +02005966 synchronize_net();
5967 err = 0;
5968out:
5969 return err;
5970}
Johannes Berg463d0182009-07-14 00:33:35 +02005971EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005972
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973static int dev_cpu_callback(struct notifier_block *nfb,
5974 unsigned long action,
5975 void *ocpu)
5976{
5977 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978 struct sk_buff *skb;
5979 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5980 struct softnet_data *sd, *oldsd;
5981
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005982 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005983 return NOTIFY_OK;
5984
5985 local_irq_disable();
5986 cpu = smp_processor_id();
5987 sd = &per_cpu(softnet_data, cpu);
5988 oldsd = &per_cpu(softnet_data, oldcpu);
5989
5990 /* Find end of our completion_queue. */
5991 list_skb = &sd->completion_queue;
5992 while (*list_skb)
5993 list_skb = &(*list_skb)->next;
5994 /* Append completion queue from offline CPU. */
5995 *list_skb = oldsd->completion_queue;
5996 oldsd->completion_queue = NULL;
5997
Linus Torvalds1da177e2005-04-16 15:20:36 -07005998 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00005999 if (oldsd->output_queue) {
6000 *sd->output_queue_tailp = oldsd->output_queue;
6001 sd->output_queue_tailp = oldsd->output_queue_tailp;
6002 oldsd->output_queue = NULL;
6003 oldsd->output_queue_tailp = &oldsd->output_queue;
6004 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006005 /* Append NAPI poll list from offline CPU. */
6006 if (!list_empty(&oldsd->poll_list)) {
6007 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6008 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010
6011 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6012 local_irq_enable();
6013
6014 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006015 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6016 netif_rx(skb);
6017 input_queue_head_incr(oldsd);
6018 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006019 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006020 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006021 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023
6024 return NOTIFY_OK;
6025}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026
6027
Herbert Xu7f353bf2007-08-10 15:47:58 -07006028/**
Herbert Xub63365a2008-10-23 01:11:29 -07006029 * netdev_increment_features - increment feature set by one
6030 * @all: current feature set
6031 * @one: new feature set
6032 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006033 *
6034 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006035 * @one to the master device with current feature set @all. Will not
6036 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006037 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006038netdev_features_t netdev_increment_features(netdev_features_t all,
6039 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006040{
Michał Mirosław1742f182011-04-22 06:31:16 +00006041 if (mask & NETIF_F_GEN_CSUM)
6042 mask |= NETIF_F_ALL_CSUM;
6043 mask |= NETIF_F_VLAN_CHALLENGED;
6044
6045 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6046 all &= one | ~NETIF_F_ALL_FOR_ALL;
6047
Michał Mirosław1742f182011-04-22 06:31:16 +00006048 /* If one device supports hw checksumming, set for all. */
6049 if (all & NETIF_F_GEN_CSUM)
6050 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006051
6052 return all;
6053}
Herbert Xub63365a2008-10-23 01:11:29 -07006054EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006055
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006056static struct hlist_head *netdev_create_hash(void)
6057{
6058 int i;
6059 struct hlist_head *hash;
6060
6061 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6062 if (hash != NULL)
6063 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6064 INIT_HLIST_HEAD(&hash[i]);
6065
6066 return hash;
6067}
6068
Eric W. Biederman881d9662007-09-17 11:56:21 -07006069/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006070static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006071{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006072 if (net != &init_net)
6073 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006074
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006075 net->dev_name_head = netdev_create_hash();
6076 if (net->dev_name_head == NULL)
6077 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006078
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006079 net->dev_index_head = netdev_create_hash();
6080 if (net->dev_index_head == NULL)
6081 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006082
6083 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006084
6085err_idx:
6086 kfree(net->dev_name_head);
6087err_name:
6088 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006089}
6090
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006091/**
6092 * netdev_drivername - network driver for the device
6093 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006094 *
6095 * Determine network driver for device.
6096 */
David S. Miller3019de12011-06-06 16:41:33 -07006097const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006098{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006099 const struct device_driver *driver;
6100 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006101 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006102
6103 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006104 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006105 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006106
6107 driver = parent->driver;
6108 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006109 return driver->name;
6110 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006111}
6112
Joe Perchesb004ff42012-09-12 20:12:19 -07006113static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006114 struct va_format *vaf)
6115{
6116 int r;
6117
Joe Perchesb004ff42012-09-12 20:12:19 -07006118 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006119 r = dev_printk_emit(level[1] - '0',
6120 dev->dev.parent,
6121 "%s %s %s: %pV",
6122 dev_driver_string(dev->dev.parent),
6123 dev_name(dev->dev.parent),
6124 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006125 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006126 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006127 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006128 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006129 }
Joe Perches256df2f2010-06-27 01:02:35 +00006130
6131 return r;
6132}
6133
6134int netdev_printk(const char *level, const struct net_device *dev,
6135 const char *format, ...)
6136{
6137 struct va_format vaf;
6138 va_list args;
6139 int r;
6140
6141 va_start(args, format);
6142
6143 vaf.fmt = format;
6144 vaf.va = &args;
6145
6146 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006147
Joe Perches256df2f2010-06-27 01:02:35 +00006148 va_end(args);
6149
6150 return r;
6151}
6152EXPORT_SYMBOL(netdev_printk);
6153
6154#define define_netdev_printk_level(func, level) \
6155int func(const struct net_device *dev, const char *fmt, ...) \
6156{ \
6157 int r; \
6158 struct va_format vaf; \
6159 va_list args; \
6160 \
6161 va_start(args, fmt); \
6162 \
6163 vaf.fmt = fmt; \
6164 vaf.va = &args; \
6165 \
6166 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006167 \
Joe Perches256df2f2010-06-27 01:02:35 +00006168 va_end(args); \
6169 \
6170 return r; \
6171} \
6172EXPORT_SYMBOL(func);
6173
6174define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6175define_netdev_printk_level(netdev_alert, KERN_ALERT);
6176define_netdev_printk_level(netdev_crit, KERN_CRIT);
6177define_netdev_printk_level(netdev_err, KERN_ERR);
6178define_netdev_printk_level(netdev_warn, KERN_WARNING);
6179define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6180define_netdev_printk_level(netdev_info, KERN_INFO);
6181
Pavel Emelyanov46650792007-10-08 20:38:39 -07006182static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006183{
6184 kfree(net->dev_name_head);
6185 kfree(net->dev_index_head);
6186}
6187
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006188static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006189 .init = netdev_init,
6190 .exit = netdev_exit,
6191};
6192
Pavel Emelyanov46650792007-10-08 20:38:39 -07006193static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006194{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006195 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006196 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006197 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006198 * initial network namespace
6199 */
6200 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006201 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006202 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006203 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006204
6205 /* Ignore unmoveable devices (i.e. loopback) */
6206 if (dev->features & NETIF_F_NETNS_LOCAL)
6207 continue;
6208
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006209 /* Leave virtual devices for the generic cleanup */
6210 if (dev->rtnl_link_ops)
6211 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006212
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006213 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006214 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6215 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006216 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006217 pr_emerg("%s: failed to move %s to init_net: %d\n",
6218 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006219 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006220 }
6221 }
6222 rtnl_unlock();
6223}
6224
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006225static void __net_exit default_device_exit_batch(struct list_head *net_list)
6226{
6227 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006228 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006229 * Do this across as many network namespaces as possible to
6230 * improve batching efficiency.
6231 */
6232 struct net_device *dev;
6233 struct net *net;
6234 LIST_HEAD(dev_kill_list);
6235
6236 rtnl_lock();
6237 list_for_each_entry(net, net_list, exit_list) {
6238 for_each_netdev_reverse(net, dev) {
6239 if (dev->rtnl_link_ops)
6240 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6241 else
6242 unregister_netdevice_queue(dev, &dev_kill_list);
6243 }
6244 }
6245 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006246 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006247 rtnl_unlock();
6248}
6249
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006250static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006251 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006252 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006253};
6254
Linus Torvalds1da177e2005-04-16 15:20:36 -07006255/*
6256 * Initialize the DEV module. At boot time this walks the device list and
6257 * unhooks any devices that fail to initialise (normally hardware not
6258 * present) and leaves us with a valid list of present and active devices.
6259 *
6260 */
6261
6262/*
6263 * This is called single threaded during boot, so no need
6264 * to take the rtnl semaphore.
6265 */
6266static int __init net_dev_init(void)
6267{
6268 int i, rc = -ENOMEM;
6269
6270 BUG_ON(!dev_boot_phase);
6271
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272 if (dev_proc_init())
6273 goto out;
6274
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006275 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006276 goto out;
6277
6278 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006279 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006280 INIT_LIST_HEAD(&ptype_base[i]);
6281
Vlad Yasevich62532da2012-11-15 08:49:10 +00006282 INIT_LIST_HEAD(&offload_base);
6283
Eric W. Biederman881d9662007-09-17 11:56:21 -07006284 if (register_pernet_subsys(&netdev_net_ops))
6285 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006286
6287 /*
6288 * Initialise the packet receive queues.
6289 */
6290
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006291 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006292 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006293
Changli Gaodee42872010-05-02 05:42:16 +00006294 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006295 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006296 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006297 sd->completion_queue = NULL;
6298 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006299 sd->output_queue = NULL;
6300 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006301#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006302 sd->csd.func = rps_trigger_softirq;
6303 sd->csd.info = sd;
6304 sd->csd.flags = 0;
6305 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006306#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006307
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006308 sd->backlog.poll = process_backlog;
6309 sd->backlog.weight = weight_p;
6310 sd->backlog.gro_list = NULL;
6311 sd->backlog.gro_count = 0;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00006312
6313#ifdef CONFIG_NET_FLOW_LIMIT
6314 sd->flow_limit = NULL;
6315#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006316 }
6317
Linus Torvalds1da177e2005-04-16 15:20:36 -07006318 dev_boot_phase = 0;
6319
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006320 /* The loopback device is special if any other network devices
6321 * is present in a network namespace the loopback device must
6322 * be present. Since we now dynamically allocate and free the
6323 * loopback device ensure this invariant is maintained by
6324 * keeping the loopback device as the first device on the
6325 * list of network devices. Ensuring the loopback devices
6326 * is the first device that appears and the last network device
6327 * that disappears.
6328 */
6329 if (register_pernet_device(&loopback_net_ops))
6330 goto out;
6331
6332 if (register_pernet_device(&default_device_ops))
6333 goto out;
6334
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006335 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6336 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006337
6338 hotcpu_notifier(dev_cpu_callback, 0);
6339 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006340 rc = 0;
6341out:
6342 return rc;
6343}
6344
6345subsys_initcall(net_dev_init);