blob: 514045c704a8d3ff44b9d7338c4476de924390bd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070014 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
Jean Pihete8db0be2011-08-25 15:35:03 +020028#include <linux/pm_qos.h>
Al Virod7fe0f22006-12-03 23:15:30 -050029#include <linux/timer.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050030#include <linux/bug.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070031#include <linux/delay.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/cache.h>
34#include <asm/byteorder.h>
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070037#include <linux/rculist.h>
Chris Leechdb217332006-06-17 21:24:58 -070038#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070039#include <linux/workqueue.h>
Tom Herbert114cf582011-11-28 16:33:09 +000040#include <linux/dynamic_queue_limits.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000042#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020043#include <net/net_namespace.h>
Lennert Buytenhekcf85d082008-10-07 13:45:02 +000044#include <net/dsa.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080045#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080046#include <net/dcbnl.h>
47#endif
Neil Horman5bc14212011-11-22 05:10:51 +000048#include <net/netprio_cgroup.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020049
Michał Mirosława59e2ec2011-11-15 15:29:55 +000050#include <linux/netdev_features.h>
John Fastabend77162022012-04-15 06:43:56 +000051#include <linux/neighbour.h>
David Howells607ca462012-10-13 10:46:48 +010052#include <uapi/linux/netdevice.h>
Michał Mirosława59e2ec2011-11-15 15:29:55 +000053
Jeff Moyer115c1d62005-06-22 22:05:31 -070054struct netpoll_info;
Paul Gortmaker313162d2012-01-30 11:46:54 -050055struct device;
Richard Cochranc1f19b52010-07-17 08:49:36 +000056struct phy_device;
Johannes Berg704232c2007-04-23 12:20:05 -070057/* 802.11 specific */
58struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 /* source back-compat hooks */
60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
62
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +000063extern void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops);
65
Stefan Assmannc1f79422010-07-22 02:50:21 +000066/* hardware address assignment types */
67#define NET_ADDR_PERM 0 /* address is permanent (default) */
68#define NET_ADDR_RANDOM 1 /* address is generated randomly */
69#define NET_ADDR_STOLEN 2 /* address is stolen from other device */
Jiri Pirkofbdeca22013-01-01 03:30:16 +000070#define NET_ADDR_SET 3 /* address is set using
71 * dev_set_mac_address() */
Stefan Assmannc1f79422010-07-22 02:50:21 +000072
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000073/* Backlog congestion levels */
74#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75#define NET_RX_DROP 1 /* packet dropped */
76
Patrick McHardy572a9d72009-11-10 06:14:14 +000077/*
78 * Transmit return codes: transmit return codes originate from three different
79 * namespaces:
80 *
81 * - qdisc return codes
82 * - driver transmit return codes
83 * - errno values
84 *
85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously, in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), all
91 * others are propagated to higher layers.
92 */
93
94/* qdisc ->enqueue() return codes. */
95#define NET_XMIT_SUCCESS 0x00
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000096#define NET_XMIT_DROP 0x01 /* skb dropped */
97#define NET_XMIT_CN 0x02 /* congestion notification */
98#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
99#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200101/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
102 * indicates that the device will soon be dropping packets, or already drops
103 * some packets of the same priority; prompting us to send less aggressively. */
Patrick McHardy572a9d72009-11-10 06:14:14 +0000104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000107/* Driver transmit return codes */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000108#define NETDEV_TX_MASK 0xf0
Patrick McHardy572a9d72009-11-10 06:14:14 +0000109
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000110enum netdev_tx {
Patrick McHardy572a9d72009-11-10 06:14:14 +0000111 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000112 NETDEV_TX_OK = 0x00, /* driver took care of packet */
113 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
114 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000115};
116typedef enum netdev_tx netdev_tx_t;
117
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000118/*
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
121 */
122static inline bool dev_xmit_complete(int rc)
123{
124 /*
125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
129 */
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/*
137 * Compute the worst case header length according to the protocols
138 * used.
139 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800140
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000141#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
David S. Miller8388e3d2008-05-12 20:17:33 -0700142# if defined(CONFIG_MAC80211_MESH)
143# define LL_MAX_HEADER 128
144# else
145# define LL_MAX_HEADER 96
146# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700148# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149#endif
150
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000151#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define MAX_HEADER LL_MAX_HEADER
154#else
155#define MAX_HEADER (LL_MAX_HEADER + 48)
156#endif
157
158/*
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000159 * Old network device statistics. Fields are native words
160 * (unsigned long) so they can be read and written atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800162
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800163struct net_device_stats {
Ben Hutchings3cfde792010-07-09 09:11:52 +0000164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 unsigned long collisions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 unsigned long rx_length_errors;
Ben Hutchings3cfde792010-07-09 09:11:52 +0000175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187};
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190#include <linux/cache.h>
191#include <linux/skbuff.h>
192
Eric Dumazetadc93002011-11-17 03:13:26 +0000193#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +0100194#include <linux/static_key.h>
195extern struct static_key rps_needed;
Eric Dumazetadc93002011-11-17 03:13:26 +0000196#endif
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198struct neighbour;
199struct neigh_parms;
200struct sk_buff;
201
Jiri Pirkof001fde2009-05-05 02:48:28 +0000202struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000206#define NETDEV_HW_ADDR_T_LAN 1
207#define NETDEV_HW_ADDR_T_SAN 2
208#define NETDEV_HW_ADDR_T_SLAVE 3
209#define NETDEV_HW_ADDR_T_UNICAST 4
Jiri Pirko22bedad32010-04-01 21:22:57 +0000210#define NETDEV_HW_ADDR_T_MULTICAST 5
Jiri Pirko22bedad32010-04-01 21:22:57 +0000211 bool global_use;
Vlad Yasevich4cd729b02013-04-15 09:54:25 +0000212 int sync_cnt;
Eric Dumazet8f8f1032010-09-19 11:24:02 -0700213 int refcount;
Vlad Yasevich4543fbe2013-04-02 17:10:07 -0400214 int synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000215 struct rcu_head rcu_head;
216};
217
Jiri Pirko31278e72009-06-17 01:12:19 +0000218struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221};
222
Jiri Pirko22bedad32010-04-01 21:22:57 +0000223#define netdev_hw_addr_list_count(l) ((l)->count)
224#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225#define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800230#define netdev_for_each_uc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800232
Jiri Pirko22bedad32010-04-01 21:22:57 +0000233#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
Pavel Roskin18e225f2010-04-07 16:40:09 -0700235#define netdev_for_each_mc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
Jiri Pirko6683ece32010-02-04 10:22:25 -0800237
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800238struct hh_cache {
David S. Millerf6b72b622011-07-14 07:53:20 -0700239 u16 hh_len;
David S. Miller5c25f682011-07-13 00:51:10 -0700240 u16 __pad;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800241 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 /* cached hardware header; allow for machine alignment needs. */
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
253 * Alternative is:
254 * dev->hard_header_len ? (dev->hard_header_len +
255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
256 *
257 * We could use other alignment values, but we must maintain the
258 * relationship HH alignment <= LL alignment.
259 */
260#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
Eric Dumazet95c96172012-04-15 05:58:06 +0000268 const void *saddr, unsigned int len);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*rebuild)(struct sk_buff *skb);
David S. Millere69dd332011-07-12 23:28:12 -0700271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev,
274 const unsigned char *haddr);
275};
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277/* These flag bits are private to the generic network queueing
278 * layer, they may not be explicitly referenced by any other
279 * code.
280 */
281
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800282enum netdev_state_t {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288};
289
290
291/*
292 * This structure holds at boot time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800293 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 */
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -0300301extern int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700304 * Structure for NAPI scheduling similar to tasklet but with weighting
305 */
306struct napi_struct {
307 /* The poll_list must only be managed by the entity which
308 * changes the state of the NAPI_STATE_SCHED bit. This means
309 * whoever atomically sets that bit can add this napi_struct
310 * to the per-cpu poll_list, and whoever clears that bit
311 * can remove from the list right before clearing the bit.
312 */
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000317 unsigned int gro_count;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock;
321 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700322#endif
Herbert Xu5d38a072009-01-04 16:13:40 -0800323 struct net_device *dev;
Herbert Xud565b0a2008-12-15 23:38:52 -0800324 struct sk_buff *gro_list;
Herbert Xu5d38a072009-01-04 16:13:40 -0800325 struct sk_buff *skb;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000326 struct list_head dev_list;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700329};
330
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800331enum {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700332 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800333 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300335 NAPI_STATE_HASHED, /* In NAPI hash */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700336};
337
Ben Hutchings5b252f02009-10-29 07:17:09 +0000338enum gro_result {
Herbert Xud1c76af2009-03-16 10:50:02 -0700339 GRO_MERGED,
340 GRO_MERGED_FREE,
341 GRO_HELD,
342 GRO_NORMAL,
343 GRO_DROP,
344};
Ben Hutchings5b252f02009-10-29 07:17:09 +0000345typedef enum gro_result gro_result_t;
Herbert Xud1c76af2009-03-16 10:50:02 -0700346
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000347/*
348 * enum rx_handler_result - Possible return values for rx_handlers.
349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
350 * further.
351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
352 * case skb->dev was changed by rx_handler.
353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
354 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
355 *
356 * rx_handlers are functions called from inside __netif_receive_skb(), to do
357 * special processing of the skb, prior to delivery to protocol handlers.
358 *
359 * Currently, a net_device can only have a single rx_handler registered. Trying
360 * to register a second rx_handler will return -EBUSY.
361 *
362 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
363 * To unregister a rx_handler on a net_device, use
364 * netdev_rx_handler_unregister().
365 *
366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
367 * do with the skb.
368 *
369 * If the rx_handler consumed to skb in some way, it should return
370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
371 * the skb to be delivered in some other ways.
372 *
373 * If the rx_handler changed skb->dev, to divert the skb to another
374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
375 * new device will be called if it exists.
376 *
377 * If the rx_handler consider the skb should be ignored, it should return
378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
Adam Buchbinderd93cf062012-09-19 21:47:58 -0400379 * are registered on exact device (ptype->dev == skb->dev).
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000380 *
381 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
382 * delivered, it should return RX_HANDLER_PASS.
383 *
384 * A device without a registered rx_handler will behave as if rx_handler
385 * returned RX_HANDLER_PASS.
386 */
387
388enum rx_handler_result {
389 RX_HANDLER_CONSUMED,
390 RX_HANDLER_ANOTHER,
391 RX_HANDLER_EXACT,
392 RX_HANDLER_PASS,
393};
394typedef enum rx_handler_result rx_handler_result_t;
395typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000396
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800397extern void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700398
David S. Miller4d295152012-03-07 21:02:35 -0500399static inline bool napi_disable_pending(struct napi_struct *n)
David S. Millera0a46192008-01-07 20:35:07 -0800400{
401 return test_bit(NAPI_STATE_DISABLE, &n->state);
402}
403
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700404/**
405 * napi_schedule_prep - check if napi can be scheduled
406 * @n: napi context
407 *
408 * Test if NAPI routine is already running, and if not mark
409 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800410 * insure only one NAPI poll instance runs. We also make
411 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700412 */
David S. Miller4d295152012-03-07 21:02:35 -0500413static inline bool napi_schedule_prep(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700414{
David S. Millera0a46192008-01-07 20:35:07 -0800415 return !napi_disable_pending(n) &&
416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700417}
418
419/**
420 * napi_schedule - schedule NAPI poll
421 * @n: napi context
422 *
423 * Schedule NAPI poll routine to be called if it is not already
424 * running.
425 */
426static inline void napi_schedule(struct napi_struct *n)
427{
428 if (napi_schedule_prep(n))
429 __napi_schedule(n);
430}
431
Roland Dreierbfe13f52007-10-09 15:47:37 -0700432/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
David S. Miller4d295152012-03-07 21:02:35 -0500433static inline bool napi_reschedule(struct napi_struct *napi)
Roland Dreierbfe13f52007-10-09 15:47:37 -0700434{
435 if (napi_schedule_prep(napi)) {
436 __napi_schedule(napi);
David S. Miller4d295152012-03-07 21:02:35 -0500437 return true;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700438 }
David S. Miller4d295152012-03-07 21:02:35 -0500439 return false;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700440}
441
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700442/**
443 * napi_complete - NAPI processing complete
444 * @n: napi context
445 *
446 * Mark NAPI processing as complete.
447 */
Herbert Xud565b0a2008-12-15 23:38:52 -0800448extern void __napi_complete(struct napi_struct *n);
449extern void napi_complete(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700450
451/**
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300452 * napi_by_id - lookup a NAPI by napi_id
453 * @napi_id: hashed napi_id
454 *
455 * lookup @napi_id in napi_hash table
456 * must be called under rcu_read_lock()
457 */
458extern struct napi_struct *napi_by_id(unsigned int napi_id);
459
460/**
461 * napi_hash_add - add a NAPI to global hashtable
462 * @napi: napi context
463 *
464 * generate a new napi_id and store a @napi under it in napi_hash
465 */
466extern void napi_hash_add(struct napi_struct *napi);
467
468/**
469 * napi_hash_del - remove a NAPI from global table
470 * @napi: napi context
471 *
472 * Warning: caller must observe rcu grace period
473 * before freeing memory containing @napi
474 */
475extern void napi_hash_del(struct napi_struct *napi);
476
477/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700478 * napi_disable - prevent NAPI from scheduling
479 * @n: napi context
480 *
481 * Stop NAPI from being scheduled on this context.
482 * Waits till any outstanding processing completes.
483 */
484static inline void napi_disable(struct napi_struct *n)
485{
David S. Millera0a46192008-01-07 20:35:07 -0800486 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700488 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800489 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700490}
491
492/**
493 * napi_enable - enable NAPI scheduling
494 * @n: napi context
495 *
496 * Resume NAPI from being scheduled on this context.
497 * Must be paired with napi_disable.
498 */
499static inline void napi_enable(struct napi_struct *n)
500{
501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
502 smp_mb__before_clear_bit();
503 clear_bit(NAPI_STATE_SCHED, &n->state);
504}
505
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700506#ifdef CONFIG_SMP
507/**
508 * napi_synchronize - wait until NAPI is not running
509 * @n: napi context
510 *
511 * Wait until NAPI is done being scheduled on this context.
512 * Waits till any outstanding processing completes but
513 * does not disable future activations.
514 */
515static inline void napi_synchronize(const struct napi_struct *n)
516{
517 while (test_bit(NAPI_STATE_SCHED, &n->state))
518 msleep(1);
519}
520#else
521# define napi_synchronize(n) barrier()
522#endif
523
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800524enum netdev_queue_state_t {
Tom Herbert734664982011-11-28 16:32:44 +0000525 __QUEUE_STATE_DRV_XOFF,
526 __QUEUE_STATE_STACK_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700527 __QUEUE_STATE_FROZEN,
Tom Herbert734664982011-11-28 16:32:44 +0000528#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
529 (1 << __QUEUE_STATE_STACK_XOFF))
530#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
531 (1 << __QUEUE_STATE_FROZEN))
David S. Miller79d16382008-07-08 23:14:46 -0700532};
Tom Herbert734664982011-11-28 16:32:44 +0000533/*
534 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
535 * netif_tx_* functions below are used to manipulate this flag. The
536 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
537 * queue independently. The netif_xmit_*stopped functions below are called
538 * to check if the queue has been stopped by the driver or stack (either
539 * of the XOFF bits are set in the state). Drivers should not need to call
540 * netif_xmit*stopped functions, they should only be using netif_tx_*.
541 */
David S. Miller79d16382008-07-08 23:14:46 -0700542
David S. Millerbb949fb2008-07-08 16:55:56 -0700543struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700544/*
545 * read mostly part
546 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700547 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700548 struct Qdisc *qdisc;
549 struct Qdisc *qdisc_sleeping;
david decotignyccf5ff62011-11-16 12:15:10 +0000550#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000551 struct kobject kobj;
552#endif
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000553#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
554 int numa_node;
555#endif
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700556/*
557 * write mostly part
558 */
559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
560 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700561 /*
562 * please use this field instead of dev->trans_start
563 */
564 unsigned long trans_start;
david decotignyccf5ff62011-11-16 12:15:10 +0000565
566 /*
567 * Number of TX timeouts for this queue
568 * (/sys/class/net/DEV/Q/trans_timeout)
569 */
570 unsigned long trans_timeout;
Tom Herbert114cf582011-11-28 16:33:09 +0000571
572 unsigned long state;
573
574#ifdef CONFIG_BQL
575 struct dql dql;
576#endif
David S. Millere8a04642008-07-17 00:34:19 -0700577} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700578
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000579static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
580{
581#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
582 return q->numa_node;
583#else
Changli Gaob236da62010-12-14 03:09:15 +0000584 return NUMA_NO_NODE;
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000585#endif
586}
587
588static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
589{
590#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
591 q->numa_node = node;
592#endif
593}
594
Eric Dumazetdf334542010-03-24 19:13:54 +0000595#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000596/*
597 * This structure holds an RPS map which can be of variable length. The
598 * map is an array of CPUs.
599 */
600struct rps_map {
601 unsigned int len;
602 struct rcu_head rcu;
603 u16 cpus[0];
604};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000605#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
Tom Herbert0a9627f2010-03-16 08:03:29 +0000606
Tom Herbertfec5e652010-04-16 16:01:27 -0700607/*
Ben Hutchingsc4454772011-01-19 11:03:53 +0000608 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
609 * tail pointer for that CPU's input queue at the time of last enqueue, and
610 * a hardware filter index.
Tom Herbertfec5e652010-04-16 16:01:27 -0700611 */
612struct rps_dev_flow {
613 u16 cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +0000614 u16 filter;
Tom Herbertfec5e652010-04-16 16:01:27 -0700615 unsigned int last_qtail;
616};
Ben Hutchingsc4454772011-01-19 11:03:53 +0000617#define RPS_NO_FILTER 0xffff
Tom Herbertfec5e652010-04-16 16:01:27 -0700618
619/*
620 * The rps_dev_flow_table structure contains a table of flow mappings.
621 */
622struct rps_dev_flow_table {
623 unsigned int mask;
624 struct rcu_head rcu;
Tom Herbertfec5e652010-04-16 16:01:27 -0700625 struct rps_dev_flow flows[0];
626};
627#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000628 ((_num) * sizeof(struct rps_dev_flow)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700629
630/*
631 * The rps_sock_flow_table contains mappings of flows to the last CPU
632 * on which they were processed by the application (set in recvmsg).
633 */
634struct rps_sock_flow_table {
635 unsigned int mask;
636 u16 ents[0];
637};
638#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000639 ((_num) * sizeof(u16)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700640
641#define RPS_NO_CPU 0xffff
642
643static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
644 u32 hash)
645{
646 if (table && hash) {
647 unsigned int cpu, index = hash & table->mask;
648
649 /* We only give a hint, preemption can change cpu under us */
650 cpu = raw_smp_processor_id();
651
652 if (table->ents[index] != cpu)
653 table->ents[index] = cpu;
654 }
655}
656
657static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
658 u32 hash)
659{
660 if (table && hash)
661 table->ents[hash & table->mask] = RPS_NO_CPU;
662}
663
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000664extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
Tom Herbertfec5e652010-04-16 16:01:27 -0700665
Ben Hutchingsc4454772011-01-19 11:03:53 +0000666#ifdef CONFIG_RFS_ACCEL
667extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
668 u32 flow_id, u16 filter_id);
669#endif
670
Tom Herbert0a9627f2010-03-16 08:03:29 +0000671/* This structure contains an instance of an RX queue. */
672struct netdev_rx_queue {
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table;
675 struct kobject kobj;
Tom Herbertfe822242010-11-09 10:47:38 +0000676 struct net_device *dev;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000677} ____cacheline_aligned_in_smp;
Tom Herbertfec5e652010-04-16 16:01:27 -0700678#endif /* CONFIG_RPS */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800679
Tom Herbertbf264142010-11-26 08:36:09 +0000680#ifdef CONFIG_XPS
681/*
682 * This structure holds an XPS map which can be of variable length. The
683 * map is an array of queues.
684 */
685struct xps_map {
686 unsigned int len;
687 unsigned int alloc_len;
688 struct rcu_head rcu;
689 u16 queues[0];
690};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000691#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
Tom Herbertbf264142010-11-26 08:36:09 +0000692#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
693 / sizeof(u16))
694
695/*
696 * This structure holds all XPS maps for device. Maps are indexed by CPU.
697 */
698struct xps_dev_maps {
699 struct rcu_head rcu;
Eric Dumazeta4177862010-11-28 21:43:02 +0000700 struct xps_map __rcu *cpu_map[0];
Tom Herbertbf264142010-11-26 08:36:09 +0000701};
702#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
703 (nr_cpu_ids * sizeof(struct xps_map *)))
704#endif /* CONFIG_XPS */
705
John Fastabend4f57c082011-01-17 08:06:04 +0000706#define TC_MAX_QUEUE 16
707#define TC_BITMASK 15
708/* HW offloaded queuing disciplines txq count and offset maps */
709struct netdev_tc_txq {
710 u16 count;
711 u16 offset;
712};
713
Neerav Parikh68bad942012-01-04 20:23:39 +0000714#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
715/*
716 * This structure is to hold information about the device
717 * configured to run FCoE protocol stack.
718 */
719struct netdev_fcoe_hbainfo {
720 char manufacturer[64];
721 char serial_number[64];
722 char hardware_version[64];
723 char driver_version[64];
724 char optionrom_version[64];
725 char firmware_version[64];
726 char model[256];
727 char model_description[256];
728};
729#endif
730
Jiri Pirko66b52b02013-07-29 18:16:49 +0200731#define MAX_PHYS_PORT_ID_LEN 32
732
733/* This structure holds a unique identifier to identify the
734 * physical port used by a netdevice.
735 */
736struct netdev_phys_port_id {
737 unsigned char id[MAX_PHYS_PORT_ID_LEN];
738 unsigned char id_len;
739};
740
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800741/*
742 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800743 * The following hooks can be defined; unless noted otherwise, they are
744 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800745 *
746 * int (*ndo_init)(struct net_device *dev);
747 * This function is called once when network device is registered.
748 * The network device can use this to any late stage initializaton
749 * or semantic validattion. It can fail with an error code which will
750 * be propogated back to register_netdev
751 *
752 * void (*ndo_uninit)(struct net_device *dev);
753 * This function is called when device is unregistered or when registration
754 * fails. It is not called if init fails.
755 *
756 * int (*ndo_open)(struct net_device *dev);
757 * This function is called when network device transistions to the up
758 * state.
759 *
760 * int (*ndo_stop)(struct net_device *dev);
761 * This function is called when network device transistions to the down
762 * state.
763 *
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000764 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
765 * struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800766 * Called when a packet needs to be transmitted.
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000767 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
768 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
Stephen Hemminger00829822008-11-20 20:14:53 -0800769 * Required can not be NULL.
770 *
771 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
772 * Called to decide which queue to when device supports multiple
773 * transmit queues.
774 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800775 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
776 * This function is called to allow device receiver to make
777 * changes to configuration when multicast or promiscious is enabled.
778 *
779 * void (*ndo_set_rx_mode)(struct net_device *dev);
780 * This function is called device changes address list filtering.
Jiri Pirko01789342011-08-16 06:29:00 +0000781 * If driver handles unicast address filtering, it should set
782 * IFF_UNICAST_FLT to its priv_flags.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800783 *
784 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
785 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -0700786 * needs to be changed. If this interface is not defined, the
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800787 * mac address can not be changed.
788 *
789 * int (*ndo_validate_addr)(struct net_device *dev);
790 * Test if Media Access Control address is valid for the device.
791 *
792 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
793 * Called when a user request an ioctl which can't be handled by
794 * the generic interface code. If not defined ioctl's return
795 * not supported error code.
796 *
797 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
798 * Used to set network devices bus interface parameters. This interface
799 * is retained for legacy reason, new devices should use the bus
800 * interface (PCI) for low level management.
801 *
802 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
803 * Called when a user wants to change the Maximum Transfer Unit
804 * of a device. If not defined, any request to change MTU will
805 * will return an error.
806 *
Stephen Hemminger00829822008-11-20 20:14:53 -0800807 * void (*ndo_tx_timeout)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800808 * Callback uses when the transmitter has not made any progress
809 * for dev->watchdog ticks.
810 *
Ben Hutchings3cfde792010-07-09 09:11:52 +0000811 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
Eric Dumazet28172732010-07-07 14:58:56 -0700812 * struct rtnl_link_stats64 *storage);
Wolfram Sangd308e382009-10-07 13:53:11 -0700813 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800814 * Called when a user wants to get the network device usage
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000815 * statistics. Drivers must do one of the following:
Ben Hutchings3cfde792010-07-09 09:11:52 +0000816 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
817 * rtnl_link_stats64 structure passed by the caller.
Ben Hutchings82695d92010-06-15 15:08:48 -0700818 * 2. Define @ndo_get_stats to update a net_device_stats structure
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000819 * (which should normally be dev->stats) and return a pointer to
820 * it. The structure may be changed asynchronously only if each
821 * field is written atomically.
822 * 3. Update dev->stats asynchronously and atomically, and define
823 * neither operation.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800824 *
Patrick McHardy80d5c362013-04-19 02:04:28 +0000825 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
826 * If device support VLAN filtering this function is called when a
827 * VLAN id is registered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800828 *
Jiri Pirko8e586132011-12-08 19:52:37 -0500829 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000830 * If device support VLAN filtering this function is called when a
831 * VLAN id is unregistered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800832 *
833 * void (*ndo_poll_controller)(struct net_device *dev);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000834 *
835 * SR-IOV management functions.
836 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
837 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
838 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
Greg Rose5f8444a2011-10-08 03:05:24 +0000839 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000840 * int (*ndo_get_vf_config)(struct net_device *dev,
841 * int vf, struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +0300842 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -0700843 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
844 * struct nlattr *port[]);
845 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +0000846 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
847 * Called to setup 'tc' number of traffic classes in the net device. This
848 * is always called from the stack with the rtnl lock held and netif tx
849 * queues stopped. This allows the netdevice to perform queue management
850 * safely.
Ben Hutchingsc4454772011-01-19 11:03:53 +0000851 *
Yi Zoue9bce842011-03-09 08:48:03 +0000852 * Fiber Channel over Ethernet (FCoE) offload functions.
853 * int (*ndo_fcoe_enable)(struct net_device *dev);
854 * Called when the FCoE protocol stack wants to start using LLD for FCoE
855 * so the underlying device can perform whatever needed configuration or
856 * initialization to support acceleration of FCoE traffic.
857 *
858 * int (*ndo_fcoe_disable)(struct net_device *dev);
859 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
860 * so the underlying device can perform whatever needed clean-ups to
861 * stop supporting acceleration of FCoE traffic.
862 *
863 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
864 * struct scatterlist *sgl, unsigned int sgc);
865 * Called when the FCoE Initiator wants to initialize an I/O that
866 * is a possible candidate for Direct Data Placement (DDP). The LLD can
867 * perform necessary setup and returns 1 to indicate the device is set up
868 * successfully to perform DDP on this I/O, otherwise this returns 0.
869 *
870 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
871 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
872 * indicated by the FC exchange id 'xid', so the underlying device can
873 * clean up and reuse resources for later DDP requests.
874 *
875 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
876 * struct scatterlist *sgl, unsigned int sgc);
877 * Called when the FCoE Target wants to initialize an I/O that
878 * is a possible candidate for Direct Data Placement (DDP). The LLD can
879 * perform necessary setup and returns 1 to indicate the device is set up
880 * successfully to perform DDP on this I/O, otherwise this returns 0.
881 *
Neerav Parikh68bad942012-01-04 20:23:39 +0000882 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
883 * struct netdev_fcoe_hbainfo *hbainfo);
884 * Called when the FCoE Protocol stack wants information on the underlying
885 * device. This information is utilized by the FCoE protocol stack to
886 * register attributes with Fiber Channel management service as per the
887 * FC-GS Fabric Device Management Information(FDMI) specification.
888 *
Yi Zoue9bce842011-03-09 08:48:03 +0000889 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
890 * Called when the underlying device wants to override default World Wide
891 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
892 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
893 * protocol stack to use.
894 *
Ben Hutchingsc4454772011-01-19 11:03:53 +0000895 * RFS acceleration.
896 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
897 * u16 rxq_index, u32 flow_id);
898 * Set hardware filter for RFS. rxq_index is the target queue index;
899 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
900 * Return the filter ID on success, or a negative error code.
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000901 *
Jiri Pirko8b98a702013-01-03 22:49:02 +0000902 * Slave management functions (for bridge, bonding, etc).
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000903 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
904 * Called to make another netdev an underling.
905 *
906 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
907 * Called to release previously enslaved netdev.
Michał Mirosław5455c692011-02-15 16:59:17 +0000908 *
909 * Feature/offload setting functions.
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000910 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
911 * netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000912 * Adjusts the requested feature flags according to device-specific
913 * constraints, and returns the resulting flags. Must not modify
914 * the device state.
915 *
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000916 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000917 * Called to update device configuration to new features. Passed
918 * feature set might be less than what was returned by ndo_fix_features()).
919 * Must return >0 or -errno if it changed dev->features itself.
920 *
stephen hemmingeredc7d572012-10-01 12:32:33 +0000921 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
922 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000923 * const unsigned char *addr, u16 flags)
John Fastabend77162022012-04-15 06:43:56 +0000924 * Adds an FDB entry to dev for addr.
Vlad Yasevich1690be62013-02-13 12:00:18 +0000925 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
926 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000927 * const unsigned char *addr)
John Fastabend77162022012-04-15 06:43:56 +0000928 * Deletes the FDB entry from dev coresponding to addr.
929 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
930 * struct net_device *dev, int idx)
931 * Used to add FDB entries to dump requests. Implementers should add
932 * entries to skb and update idx with the number of entries.
John Fastabende5a55a82012-10-24 08:12:57 +0000933 *
934 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
935 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
Dmitry Kravkov24f11a52013-03-27 06:54:00 +0000936 * struct net_device *dev, u32 filter_mask)
Jiri Pirko4bf84c32012-12-27 23:49:37 +0000937 *
938 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
939 * Called to change device carrier. Soft-devices (like dummy, team, etc)
940 * which do not represent real hardware may define this to allow their
941 * userspace components to manage their virtual carrier state. Devices
942 * that determine carrier state from physical hardware properties (eg
943 * network cables) or protocol-dependent mechanisms (eg
944 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
Jiri Pirko66b52b02013-07-29 18:16:49 +0200945 *
946 * int (*ndo_get_phys_port_id)(struct net_device *dev,
947 * struct netdev_phys_port_id *ppid);
948 * Called to get ID of physical port of this device. If driver does
949 * not implement this, it is assumed that the hw is not able to have
950 * multiple net devices on single physical port.
Joseph Gasparakis53cf52752013-09-04 02:13:38 -0700951 *
952 * void (*ndo_add_vxlan_port)(struct net_device *dev,
Joseph Gasparakis35e42372013-09-13 07:34:13 -0700953 * sa_family_t sa_family, __be16 port);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -0700954 * Called by vxlan to notiy a driver about the UDP port and socket
955 * address family that vxlan is listnening to. It is called only when
956 * a new port starts listening. The operation is protected by the
957 * vxlan_net->sock_lock.
958 *
959 * void (*ndo_del_vxlan_port)(struct net_device *dev,
Joseph Gasparakis35e42372013-09-13 07:34:13 -0700960 * sa_family_t sa_family, __be16 port);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -0700961 * Called by vxlan to notify the driver about a UDP port and socket
962 * address family that vxlan is not listening to anymore. The operation
963 * is protected by the vxlan_net->sock_lock.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800964 */
965struct net_device_ops {
966 int (*ndo_init)(struct net_device *dev);
967 void (*ndo_uninit)(struct net_device *dev);
968 int (*ndo_open)(struct net_device *dev);
969 int (*ndo_stop)(struct net_device *dev);
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000970 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
Stephen Hemminger00829822008-11-20 20:14:53 -0800971 struct net_device *dev);
972 u16 (*ndo_select_queue)(struct net_device *dev,
973 struct sk_buff *skb);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800974 void (*ndo_change_rx_flags)(struct net_device *dev,
975 int flags);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800976 void (*ndo_set_rx_mode)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800977 int (*ndo_set_mac_address)(struct net_device *dev,
978 void *addr);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800979 int (*ndo_validate_addr)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800980 int (*ndo_do_ioctl)(struct net_device *dev,
981 struct ifreq *ifr, int cmd);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800982 int (*ndo_set_config)(struct net_device *dev,
983 struct ifmap *map);
Stephen Hemminger00829822008-11-20 20:14:53 -0800984 int (*ndo_change_mtu)(struct net_device *dev,
985 int new_mtu);
986 int (*ndo_neigh_setup)(struct net_device *dev,
987 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800988 void (*ndo_tx_timeout) (struct net_device *dev);
989
Eric Dumazet28172732010-07-07 14:58:56 -0700990 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
991 struct rtnl_link_stats64 *storage);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800992 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
993
Jiri Pirko8e586132011-12-08 19:52:37 -0500994 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +0000995 __be16 proto, u16 vid);
Jiri Pirko8e586132011-12-08 19:52:37 -0500996 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +0000997 __be16 proto, u16 vid);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800998#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800999 void (*ndo_poll_controller)(struct net_device *dev);
Herbert Xu4247e162010-06-10 16:12:47 +00001000 int (*ndo_netpoll_setup)(struct net_device *dev,
Amerigo Wang47be03a22012-08-10 01:24:37 +00001001 struct netpoll_info *info,
1002 gfp_t gfp);
WANG Cong0e34e932010-05-06 00:47:21 -07001003 void (*ndo_netpoll_cleanup)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001004#endif
Cong Wange0d10952013-08-01 11:10:25 +08001005#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03001006 int (*ndo_busy_poll)(struct napi_struct *dev);
Eliezer Tamir06021292013-06-10 11:39:50 +03001007#endif
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001008 int (*ndo_set_vf_mac)(struct net_device *dev,
1009 int queue, u8 *mac);
1010 int (*ndo_set_vf_vlan)(struct net_device *dev,
1011 int queue, u16 vlan, u8 qos);
1012 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
1013 int vf, int rate);
Greg Rose5f8444a2011-10-08 03:05:24 +00001014 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1015 int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001016 int (*ndo_get_vf_config)(struct net_device *dev,
1017 int vf,
1018 struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +03001019 int (*ndo_set_vf_link_state)(struct net_device *dev,
1020 int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -07001021 int (*ndo_set_vf_port)(struct net_device *dev,
1022 int vf,
1023 struct nlattr *port[]);
1024 int (*ndo_get_vf_port)(struct net_device *dev,
1025 int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +00001026 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001027#if IS_ENABLED(CONFIG_FCOE)
Yi Zoucb454392009-08-31 12:31:36 +00001028 int (*ndo_fcoe_enable)(struct net_device *dev);
1029 int (*ndo_fcoe_disable)(struct net_device *dev);
Yi Zou4d288d52009-02-27 14:06:59 -08001030 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1031 u16 xid,
1032 struct scatterlist *sgl,
1033 unsigned int sgc);
1034 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1035 u16 xid);
Yi Zou6247e082011-02-01 07:22:06 +00001036 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1037 u16 xid,
1038 struct scatterlist *sgl,
1039 unsigned int sgc);
Neerav Parikh68bad942012-01-04 20:23:39 +00001040 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1041 struct netdev_fcoe_hbainfo *hbainfo);
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001042#endif
1043
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001044#if IS_ENABLED(CONFIG_LIBFCOE)
Yi Zoudf5c7942009-10-28 18:24:35 +00001045#define NETDEV_FCOE_WWNN 0
1046#define NETDEV_FCOE_WWPN 1
1047 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1048 u64 *wwn, int type);
Yi Zou4d288d52009-02-27 14:06:59 -08001049#endif
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001050
Ben Hutchingsc4454772011-01-19 11:03:53 +00001051#ifdef CONFIG_RFS_ACCEL
1052 int (*ndo_rx_flow_steer)(struct net_device *dev,
1053 const struct sk_buff *skb,
1054 u16 rxq_index,
1055 u32 flow_id);
1056#endif
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001057 int (*ndo_add_slave)(struct net_device *dev,
1058 struct net_device *slave_dev);
1059 int (*ndo_del_slave)(struct net_device *dev,
1060 struct net_device *slave_dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001061 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1062 netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001063 int (*ndo_set_features)(struct net_device *dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001064 netdev_features_t features);
David Millerda6a8fa2011-07-25 00:01:38 +00001065 int (*ndo_neigh_construct)(struct neighbour *n);
David S. Miller447f2192011-12-19 15:04:41 -05001066 void (*ndo_neigh_destroy)(struct neighbour *n);
John Fastabend77162022012-04-15 06:43:56 +00001067
1068 int (*ndo_fdb_add)(struct ndmsg *ndm,
stephen hemmingeredc7d572012-10-01 12:32:33 +00001069 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001070 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001071 const unsigned char *addr,
John Fastabend77162022012-04-15 06:43:56 +00001072 u16 flags);
1073 int (*ndo_fdb_del)(struct ndmsg *ndm,
Vlad Yasevich1690be62013-02-13 12:00:18 +00001074 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001075 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001076 const unsigned char *addr);
John Fastabend77162022012-04-15 06:43:56 +00001077 int (*ndo_fdb_dump)(struct sk_buff *skb,
1078 struct netlink_callback *cb,
1079 struct net_device *dev,
1080 int idx);
John Fastabende5a55a82012-10-24 08:12:57 +00001081
1082 int (*ndo_bridge_setlink)(struct net_device *dev,
1083 struct nlmsghdr *nlh);
1084 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1085 u32 pid, u32 seq,
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00001086 struct net_device *dev,
1087 u32 filter_mask);
Vlad Yasevich407af322013-02-13 12:00:12 +00001088 int (*ndo_bridge_dellink)(struct net_device *dev,
1089 struct nlmsghdr *nlh);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001090 int (*ndo_change_carrier)(struct net_device *dev,
1091 bool new_carrier);
Jiri Pirko66b52b02013-07-29 18:16:49 +02001092 int (*ndo_get_phys_port_id)(struct net_device *dev,
1093 struct netdev_phys_port_id *ppid);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -07001094 void (*ndo_add_vxlan_port)(struct net_device *dev,
1095 sa_family_t sa_family,
Joseph Gasparakis35e42372013-09-13 07:34:13 -07001096 __be16 port);
Joseph Gasparakis53cf52752013-09-04 02:13:38 -07001097 void (*ndo_del_vxlan_port)(struct net_device *dev,
1098 sa_family_t sa_family,
Joseph Gasparakis35e42372013-09-13 07:34:13 -07001099 __be16 port);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001100};
1101
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001102/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 * The DEVICE structure.
1104 * Actually, this whole structure is a big mistake. It mixes I/O
1105 * data with strictly "high-level" data, and it has to know about
1106 * almost every data structure used in the INET module.
1107 *
1108 * FIXME: cleanup struct net_device such that network protocol info
1109 * moves out.
1110 */
1111
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001112struct net_device {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
1114 /*
1115 * This is the first field of the "visible" part of this structure
1116 * (i.e. as seen by users in the "Space.c" file). It is the name
Justin P. Mattock724df612010-05-26 09:22:40 -07001117 * of the interface.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 */
1119 char name[IFNAMSIZ];
Mark Grossed771342010-05-06 01:59:26 +02001120
Eric Dumazet91364612012-06-11 06:36:13 +00001121 /* device name hash chain, please keep it close to name[] */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001122 struct hlist_node name_hlist;
Eric Dumazet91364612012-06-11 06:36:13 +00001123
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001124 /* snmp alias */
1125 char *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 /*
1128 * I/O specific fields
1129 * FIXME: Merge these and struct ifmap into one
1130 */
1131 unsigned long mem_end; /* shared mem end */
1132 unsigned long mem_start; /* shared mem start */
1133 unsigned long base_addr; /* device I/O address */
1134 unsigned int irq; /* device IRQ number */
1135
1136 /*
1137 * Some hardware also needs these fields, but they are not
1138 * part of the usual set specified in Space.c.
1139 */
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 unsigned long state;
1142
Pavel Emelianov7562f872007-05-03 15:13:45 -07001143 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001144 struct list_head napi_list;
Eric Dumazet44a08732009-10-27 07:03:04 +00001145 struct list_head unreg_list;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001146
1147 /* directly linked devices, like slaves for bonding */
1148 struct {
1149 struct list_head upper;
1150 struct list_head lower;
1151 } adj_list;
1152
1153 /* all linked devices, *including* neighbours */
1154 struct {
1155 struct list_head upper;
1156 struct list_head lower;
1157 } all_adj_list;
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Michał Mirosław5455c692011-02-15 16:59:17 +00001160 /* currently active device features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001161 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00001162 /* user-changeable features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001163 netdev_features_t hw_features;
Michał Mirosław5455c692011-02-15 16:59:17 +00001164 /* user-requested features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001165 netdev_features_t wanted_features;
Michał Mirosław1aac6262011-04-12 04:07:39 +00001166 /* mask of features inheritable by VLAN devices */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001167 netdev_features_t vlan_features;
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00001168 /* mask of features inherited by encapsulating devices
1169 * This field indicates what encapsulation offloads
1170 * the hardware is capable of doing, and drivers will
1171 * need to set them appropriately.
1172 */
1173 netdev_features_t hw_enc_features;
Simon Horman0d89d202013-05-23 21:02:52 +00001174 /* mask of fetures inheritable by MPLS */
1175 netdev_features_t mpls_features;
Michał Mirosław04ed3e72011-01-24 15:32:47 -08001176
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 /* Interface index. Unique device identifier */
1178 int ifindex;
1179 int iflink;
1180
Rusty Russellc45d2862007-03-28 14:29:08 -07001181 struct net_device_stats stats;
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001182 atomic_long_t rx_dropped; /* dropped packets by core network
1183 * Do not use this in drivers.
1184 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Johannes Bergb86e0282007-04-26 20:48:23 -07001186#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 /* List of functions to handle Wireless Extensions (instead of ioctl).
1188 * See <net/iw_handler.h> for details. Jean II */
1189 const struct iw_handler_def * wireless_handlers;
1190 /* Instance data managed by the core of Wireless Extensions. */
1191 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -07001192#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001193 /* Management operations */
1194 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -07001195 const struct ethtool_ops *ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001197 /* Hardware header description */
1198 const struct header_ops *header_ops;
1199
Stefan Rompfb00055a2006-03-20 17:09:11 -08001200 unsigned int flags; /* interface flags (a la BSD) */
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001201 unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
1202 * See if.h for definitions. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 unsigned short gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 unsigned short padded; /* How much padding added by alloc_netdev() */
1205
Stefan Rompfb00055a2006-03-20 17:09:11 -08001206 unsigned char operstate; /* RFC2863 operstate */
1207 unsigned char link_mode; /* mapping policy to operstate */
1208
Joe Perchesbdc220d2011-05-09 17:42:46 +00001209 unsigned char if_port; /* Selectable AUI, TP,..*/
1210 unsigned char dma; /* DMA channel */
1211
David S. Millercd7b5392010-05-02 22:27:59 -07001212 unsigned int mtu; /* interface MTU value */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 unsigned short type; /* interface hardware type */
1214 unsigned short hard_header_len; /* hardware hdr length */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Johannes Bergf5184d22008-05-12 20:48:31 -07001216 /* extra head- and tailroom the hardware may need, but not in all cases
1217 * can this be guaranteed, especially tailroom. Some cases also use
1218 * LL_MAX_HEADER instead to allocate the skb.
1219 */
1220 unsigned short needed_headroom;
1221 unsigned short needed_tailroom;
1222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 /* Interface address info. */
Jon Wetzela6f9a702005-08-20 17:15:54 -07001224 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
Stefan Assmannc1f79422010-07-22 02:50:21 +00001225 unsigned char addr_assign_type; /* hw address assignment type */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 unsigned char addr_len; /* hardware address length */
David Miller596b9b62011-07-25 00:01:25 +00001227 unsigned char neigh_priv_len;
Narendra Kdffebd22013-06-10 19:34:03 +05301228 unsigned short dev_id; /* Used to differentiate devices
1229 * that share the same link
1230 * layer address
1231 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00001232 spinlock_t addr_list_lock;
Jiri Pirko22bedad32010-04-01 21:22:57 +00001233 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1234 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001235 struct netdev_hw_addr_list dev_addrs; /* list of device
1236 * hw addresses
1237 */
1238#ifdef CONFIG_SYSFS
1239 struct kset *queues_kset;
1240#endif
1241
Joe Perches2d348d12011-07-25 16:17:35 -07001242 bool uc_promisc;
Wang Chen9d45abe2008-06-17 21:12:48 -07001243 unsigned int promiscuity;
1244 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
1247 /* Protocol specific pointers */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001248
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001249#if IS_ENABLED(CONFIG_VLAN_8021Q)
Jiri Pirko5b9ea6e2011-12-08 04:11:18 +00001250 struct vlan_info __rcu *vlan_info; /* VLAN info */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001251#endif
Ben Hutchings34a430d2011-11-25 14:38:38 +00001252#if IS_ENABLED(CONFIG_NET_DSA)
Ben Hutchingscf50dcc2011-11-25 14:32:52 +00001253 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001254#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 void *atalk_ptr; /* AppleTalk link */
Eric Dumazet95ae6b22010-09-15 04:04:31 +00001256 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
Eric Dumazetfc766e4c2010-10-29 03:09:24 +00001257 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
Eric Dumazet198caec2010-10-24 21:32:05 +00001258 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 void *ax25_ptr; /* AX.25 specific data */
Johannes Berg704232c2007-04-23 12:20:05 -07001260 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1261 assign before registering */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001263/*
Eric Dumazetcd135392010-09-16 02:58:13 +00001264 * Cache lines mostly used on receive path (including eth_type_trans())
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001265 */
Eric Dumazet4dc89132010-08-31 07:40:16 +00001266 unsigned long last_rx; /* Time of last Rx
1267 * This should not be set in
1268 * drivers, unless really needed,
1269 * because network stack (bonding)
1270 * use it if/when necessary, to
1271 * avoid dirtying this cache line.
1272 */
1273
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001274 /* Interface address info used in eth_type_trans() */
Jiri Pirkof001fde2009-05-05 02:48:28 +00001275 unsigned char *dev_addr; /* hw address, (before bcast
1276 because most packets are
1277 unicast) */
1278
Tom Herbert0a9627f2010-03-16 08:03:29 +00001279
david decotignyccf5ff62011-11-16 12:15:10 +00001280#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001281 struct netdev_rx_queue *_rx;
1282
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001283 /* Number of RX queues allocated at register_netdev() time */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001284 unsigned int num_rx_queues;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001285
1286 /* Number of RX queues currently active in device */
1287 unsigned int real_num_rx_queues;
Ben Hutchingsc4454772011-01-19 11:03:53 +00001288
Eric Dumazetdf334542010-03-24 19:13:54 +00001289#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001290
stephen hemminger61391cd2010-11-15 06:38:12 +00001291 rx_handler_func_t __rcu *rx_handler;
1292 void __rcu *rx_handler_data;
David S. Millere8a04642008-07-17 00:34:19 -07001293
Eric Dumazet24824a02010-10-02 06:11:55 +00001294 struct netdev_queue __rcu *ingress_queue;
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001295 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1296
Eric Dumazetcd135392010-09-16 02:58:13 +00001297
1298/*
1299 * Cache lines mostly used on transmit path
1300 */
David S. Millere8a04642008-07-17 00:34:19 -07001301 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001302
1303 /* Number of TX queues allocated at alloc_netdev_mq() time */
David S. Millere8a04642008-07-17 00:34:19 -07001304 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001305
1306 /* Number of TX queues currently active in device */
1307 unsigned int real_num_tx_queues;
1308
Patrick McHardyaf356af2009-09-04 06:41:18 +00001309 /* root qdisc from userspace point of view */
1310 struct Qdisc *qdisc;
1311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 unsigned long tx_queue_len; /* Max frames per queue allowed */
David S. Millerc3f26a22008-07-31 16:58:50 -07001313 spinlock_t tx_global_lock;
Eric Dumazetcd135392010-09-16 02:58:13 +00001314
Tom Herbertbf264142010-11-26 08:36:09 +00001315#ifdef CONFIG_XPS
Eric Dumazeta4177862010-11-28 21:43:02 +00001316 struct xps_dev_maps __rcu *xps_maps;
Tom Herbertbf264142010-11-26 08:36:09 +00001317#endif
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001318#ifdef CONFIG_RFS_ACCEL
1319 /* CPU reverse-mapping for RX completion interrupts, indexed
1320 * by RX queue number. Assigned by driver. This must only be
1321 * set if the ndo_rx_flow_steer operation is defined. */
1322 struct cpu_rmap *rx_cpu_rmap;
1323#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001324
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001325 /* These may be needed for future network-power-down code. */
Eric Dumazet9d214932009-05-17 20:55:16 -07001326
1327 /*
1328 * trans_start here is expensive for high speed devices on SMP,
1329 * please use netdev_queue->trans_start instead.
1330 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001331 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1332
1333 int watchdog_timeo; /* used by dev_watchdog() */
1334 struct timer_list watchdog_timer;
1335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 /* Number of references to this device */
Eric Dumazet29b44332010-10-11 10:22:12 +00001337 int __percpu *pcpu_refcnt;
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 /* delayed register/unregister */
1340 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 /* device index hash chain */
1342 struct hlist_node index_hlist;
1343
Eric Dumazete014deb2009-11-17 05:59:21 +00001344 struct list_head link_watch_list;
Herbert Xu572a1032007-05-08 18:34:17 -07001345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 /* register/unregister state machine */
1347 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07001348 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 NETREG_UNREGISTERING, /* called unregister_netdevice */
1350 NETREG_UNREGISTERED, /* completed unregister todo */
1351 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001352 NETREG_DUMMY, /* dummy device for NAPI poll */
Eric Dumazet449f4542011-05-19 12:24:16 +00001353 } reg_state:8;
1354
1355 bool dismantle; /* device is going do be freed */
Patrick McHardya2835762010-02-26 06:34:51 +00001356
1357 enum {
1358 RTNL_LINK_INITIALIZED,
1359 RTNL_LINK_INITIALIZING,
1360 } rtnl_link_state:16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001362 /* Called from unregister, can be used to call free_netdev */
1363 void (*destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365#ifdef CONFIG_NETPOLL
Cong Wang5fbee842013-01-22 21:29:39 +00001366 struct netpoll_info __rcu *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367#endif
David S. Millereae792b2008-07-15 03:03:33 -07001368
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001369#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001370 /* Network namespace this network device is inside */
1371 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001372#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001373
David S. Miller49517042008-05-12 03:29:11 -07001374 /* mid-layer private */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001375 union {
1376 void *ml_priv;
1377 struct pcpu_lstats __percpu *lstats; /* loopback stats */
Eric Dumazet290b8952010-09-27 00:33:35 +00001378 struct pcpu_tstats __percpu *tstats; /* tunnel stats */
Eric Dumazet6d81f412010-09-27 20:50:33 +00001379 struct pcpu_dstats __percpu *dstats; /* dummy stats */
Eric Dumazet26811282012-12-29 16:02:43 +00001380 struct pcpu_vstats __percpu *vstats; /* veth stats */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001381 };
Patrick McHardyeca9eba2008-07-05 21:26:13 -07001382 /* GARP */
Eric Dumazet3cc77ec2010-10-24 21:32:36 +00001383 struct garp_port __rcu *garp_port;
David Wardfebf0182013-02-08 17:17:06 +00001384 /* MRP */
1385 struct mrp_port __rcu *mrp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 /* class/net/name entry */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001388 struct device dev;
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001389 /* space for optional device, statistics, and wireless sysfs groups */
1390 const struct attribute_group *sysfs_groups[4];
Patrick McHardy38f7b872007-06-13 12:03:51 -07001391
1392 /* rtnetlink link ops */
1393 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001394
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001395 /* for setting kernel sock attribute on TCP connection setup */
1396#define GSO_MAX_SIZE 65536
1397 unsigned int gso_max_size;
Ben Hutchings30b678d2012-07-30 15:57:00 +00001398#define GSO_MAX_SEGS 65535
1399 u16 gso_max_segs;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001400
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08001401#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08001402 /* Data Center Bridging netlink ops */
Stephen Hemminger32953542009-10-05 06:01:03 +00001403 const struct dcbnl_rtnl_ops *dcbnl_ops;
Alexander Duyck2f90b862008-11-20 20:52:10 -08001404#endif
John Fastabend4f57c082011-01-17 08:06:04 +00001405 u8 num_tc;
1406 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1407 u8 prio_tc_map[TC_BITMASK + 1];
Alexander Duyck2f90b862008-11-20 20:52:10 -08001408
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001409#if IS_ENABLED(CONFIG_FCOE)
Yi Zou4d288d52009-02-27 14:06:59 -08001410 /* max exchange id for FCoE LRO by ddp */
1411 unsigned int fcoe_ddp_xid;
1412#endif
Neil Horman5bc14212011-11-22 05:10:51 +00001413#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1414 struct netprio_map __rcu *priomap;
1415#endif
Richard Cochranc1f19b52010-07-17 08:49:36 +00001416 /* phy device may attach itself for hardware timestamping */
1417 struct phy_device *phydev;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00001418
Eric Dumazet23d3b8b2012-09-05 01:02:56 +00001419 struct lock_class_key *qdisc_tx_busylock;
1420
Vlad Dogarucbda10f2011-01-13 23:38:30 +00001421 /* group the device belongs to */
1422 int group;
Eric Dumazet91364612012-06-11 06:36:13 +00001423
1424 struct pm_qos_request pm_qos_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001426#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
1428#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
David S. Millere8a04642008-07-17 00:34:19 -07001430static inline
John Fastabend4f57c082011-01-17 08:06:04 +00001431int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1432{
1433 return dev->prio_tc_map[prio & TC_BITMASK];
1434}
1435
1436static inline
1437int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1438{
1439 if (tc >= dev->num_tc)
1440 return -EINVAL;
1441
1442 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1443 return 0;
1444}
1445
1446static inline
1447void netdev_reset_tc(struct net_device *dev)
1448{
1449 dev->num_tc = 0;
1450 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1451 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1452}
1453
1454static inline
1455int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1456{
1457 if (tc >= dev->num_tc)
1458 return -EINVAL;
1459
1460 dev->tc_to_txq[tc].count = count;
1461 dev->tc_to_txq[tc].offset = offset;
1462 return 0;
1463}
1464
1465static inline
1466int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1467{
1468 if (num_tc > TC_MAX_QUEUE)
1469 return -EINVAL;
1470
1471 dev->num_tc = num_tc;
1472 return 0;
1473}
1474
1475static inline
1476int netdev_get_num_tc(struct net_device *dev)
1477{
1478 return dev->num_tc;
1479}
1480
1481static inline
David S. Millere8a04642008-07-17 00:34:19 -07001482struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1483 unsigned int index)
1484{
1485 return &dev->_tx[index];
1486}
1487
1488static inline void netdev_for_each_tx_queue(struct net_device *dev,
1489 void (*f)(struct net_device *,
1490 struct netdev_queue *,
1491 void *),
1492 void *arg)
1493{
1494 unsigned int i;
1495
1496 for (i = 0; i < dev->num_tx_queues; i++)
1497 f(dev, &dev->_tx[i], arg);
1498}
1499
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00001500extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1501 struct sk_buff *skb);
Alexander Duyck416186f2013-01-10 08:56:51 +00001502extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00001503
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001504/*
1505 * Net namespace inlines
1506 */
1507static inline
1508struct net *dev_net(const struct net_device *dev)
1509{
Eric Dumazetc2d9ba92010-06-01 06:51:19 +00001510 return read_pnet(&dev->nd_net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001511}
1512
1513static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -07001514void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001515{
1516#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -07001517 release_net(dev->nd_net);
1518 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001519#endif
1520}
1521
Lennert Buytenhekcf85d082008-10-07 13:45:02 +00001522static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1523{
1524#ifdef CONFIG_NET_DSA_TAG_DSA
1525 if (dev->dsa_ptr != NULL)
1526 return dsa_uses_dsa_tags(dev->dsa_ptr);
1527#endif
1528
1529 return 0;
1530}
1531
Lennert Buytenhek396138f2008-10-07 13:46:07 +00001532static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1533{
1534#ifdef CONFIG_NET_DSA_TAG_TRAILER
1535 if (dev->dsa_ptr != NULL)
1536 return dsa_uses_trailer_tags(dev->dsa_ptr);
1537#endif
1538
1539 return 0;
1540}
1541
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001542/**
1543 * netdev_priv - access network device private data
1544 * @dev: network device
1545 *
1546 * Get network device private data
1547 */
Patrick McHardy6472ce62007-06-13 12:03:21 -07001548static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00001550 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551}
1552
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553/* Set the sysfs physical device reference for the network logical device
1554 * if set prior to registration will cause a symlink during initialization.
1555 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001556#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Marcel Holtmann384912e2009-08-31 21:08:19 +00001558/* Set the sysfs device type for the network logical device to allow
1559 * fin grained indentification of different network device types. For
1560 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1561 */
1562#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1563
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00001564/* Default NAPI poll() weight
1565 * Device drivers are strongly advised to not use bigger value
1566 */
1567#define NAPI_POLL_WEIGHT 64
1568
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07001569/**
1570 * netif_napi_add - initialize a napi context
1571 * @dev: network device
1572 * @napi: napi context
1573 * @poll: polling function
1574 * @weight: default weight
1575 *
1576 * netif_napi_add() must be used to initialize a napi context prior to calling
1577 * *any* of the other napi related functions.
1578 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001579void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1580 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001581
Alexander Duyckd8156532008-07-08 15:13:05 -07001582/**
1583 * netif_napi_del - remove a napi context
1584 * @napi: napi context
1585 *
1586 * netif_napi_del() removes a napi context from the network device napi list
1587 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001588void netif_napi_del(struct napi_struct *napi);
1589
1590struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00001591 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1592 void *frag0;
1593
Herbert Xu74895942009-05-26 18:50:27 +00001594 /* Length of frag0. */
1595 unsigned int frag0_len;
1596
Herbert Xu86911732009-01-29 14:19:50 +00001597 /* This indicates where we are processing relative to skb->data. */
1598 int data_offset;
1599
Herbert Xud565b0a2008-12-15 23:38:52 -08001600 /* This is non-zero if the packet cannot be merged with the new skb. */
1601 int flush;
1602
1603 /* Number of segments aggregated. */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001604 u16 count;
1605
1606 /* This is non-zero if the packet may be of the same flow. */
1607 u8 same_flow;
Herbert Xu5d38a072009-01-04 16:13:40 -08001608
1609 /* Free the skb? */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001610 u8 free;
Eric Dumazetd7e88832012-04-30 08:10:34 +00001611#define NAPI_GRO_FREE 1
1612#define NAPI_GRO_FREE_STOLEN_HEAD 2
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001613
1614 /* jiffies when first packet was created/queued */
1615 unsigned long age;
Eric Dumazet86347242012-10-08 21:38:50 +02001616
1617 /* Used in ipv6_gro_receive() */
1618 int proto;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00001619
1620 /* used in skb_gro_receive() slow path */
1621 struct sk_buff *last;
Herbert Xud565b0a2008-12-15 23:38:52 -08001622};
1623
1624#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07001625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001627 __be16 type; /* This is really htons(ether_type). */
1628 struct net_device *dev; /* NULL is wildcarded here */
1629 int (*func) (struct sk_buff *,
1630 struct net_device *,
1631 struct packet_type *,
1632 struct net_device *);
Eric Leblondc0de08d2012-08-16 22:02:58 +00001633 bool (*id_match)(struct packet_type *ptype,
1634 struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 void *af_packet_priv;
1636 struct list_head list;
1637};
1638
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001639struct offload_callbacks {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1641 netdev_features_t features);
1642 int (*gso_send_check)(struct sk_buff *skb);
1643 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1644 struct sk_buff *skb);
1645 int (*gro_complete)(struct sk_buff *skb);
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001646};
1647
1648struct packet_offload {
1649 __be16 type; /* This is really htons(ether_type). */
1650 struct offload_callbacks callbacks;
1651 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652};
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654#include <linux/notifier.h>
1655
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001656/* netdevice notifier chain. Please remember to update the rtnetlink
1657 * notification exclusion list in rtnetlink_event() when adding new
1658 * types.
1659 */
1660#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1661#define NETDEV_DOWN 0x0002
1662#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1663 detected a hardware crash and restarted
1664 - we can use this eg to kick tcp sessions
1665 once done */
1666#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1667#define NETDEV_REGISTER 0x0005
1668#define NETDEV_UNREGISTER 0x0006
1669#define NETDEV_CHANGEMTU 0x0007
1670#define NETDEV_CHANGEADDR 0x0008
1671#define NETDEV_GOING_DOWN 0x0009
1672#define NETDEV_CHANGENAME 0x000A
1673#define NETDEV_FEAT_CHANGE 0x000B
1674#define NETDEV_BONDING_FAILOVER 0x000C
1675#define NETDEV_PRE_UP 0x000D
1676#define NETDEV_PRE_TYPE_CHANGE 0x000E
1677#define NETDEV_POST_TYPE_CHANGE 0x000F
1678#define NETDEV_POST_INIT 0x0010
Eric Dumazet0115e8e2012-08-22 17:19:46 +00001679#define NETDEV_UNREGISTER_FINAL 0x0011
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001680#define NETDEV_RELEASE 0x0012
1681#define NETDEV_NOTIFY_PEERS 0x0013
1682#define NETDEV_JOIN 0x0014
Jiri Pirko42e52bf2013-05-25 04:12:10 +00001683#define NETDEV_CHANGEUPPER 0x0015
Jiri Pirko4aa5dee2013-07-20 12:13:53 +02001684#define NETDEV_RESEND_IGMP 0x0016
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001685
1686extern int register_netdevice_notifier(struct notifier_block *nb);
1687extern int unregister_netdevice_notifier(struct notifier_block *nb);
Jiri Pirko351638e2013-05-28 01:30:21 +00001688
1689struct netdev_notifier_info {
1690 struct net_device *dev;
1691};
1692
Jiri Pirkobe9efd32013-05-28 01:30:22 +00001693struct netdev_notifier_change_info {
1694 struct netdev_notifier_info info; /* must be first */
1695 unsigned int flags_changed;
1696};
1697
Cong Wang75538c22013-05-29 11:30:50 +08001698static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1699 struct net_device *dev)
1700{
1701 info->dev = dev;
1702}
1703
Jiri Pirko351638e2013-05-28 01:30:21 +00001704static inline struct net_device *
1705netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1706{
1707 return info->dev;
1708}
1709
1710extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1711 struct netdev_notifier_info *info);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001712extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1713
1714
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715extern rwlock_t dev_base_lock; /* Device list lock */
1716
Eric W. Biederman881d9662007-09-17 11:56:21 -07001717#define for_each_netdev(net, d) \
1718 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
Eric W. Biedermandcbccbd42009-11-29 22:25:26 +00001719#define for_each_netdev_reverse(net, d) \
1720 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08001721#define for_each_netdev_rcu(net, d) \
1722 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
Eric W. Biederman881d9662007-09-17 11:56:21 -07001723#define for_each_netdev_safe(net, d, n) \
1724 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1725#define for_each_netdev_continue(net, d) \
1726 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
stephen hemminger254245d2009-11-10 07:54:47 +00001727#define for_each_netdev_continue_rcu(net, d) \
1728 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00001729#define for_each_netdev_in_bond_rcu(bond, slave) \
1730 for_each_netdev_rcu(&init_net, slave) \
1731 if (netdev_master_upper_dev_get_rcu(slave) == bond)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001732#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1733
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001734static inline struct net_device *next_net_device(struct net_device *dev)
1735{
1736 struct list_head *lh;
1737 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07001738
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001739 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001740 lh = dev->dev_list.next;
1741 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1742}
1743
Eric Dumazetce81b762009-11-11 17:34:30 +00001744static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1745{
1746 struct list_head *lh;
1747 struct net *net;
1748
1749 net = dev_net(dev);
Eric Dumazetccf43432011-01-26 18:08:02 +00001750 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
Eric Dumazetce81b762009-11-11 17:34:30 +00001751 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1752}
1753
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001754static inline struct net_device *first_net_device(struct net *net)
1755{
1756 return list_empty(&net->dev_base_head) ? NULL :
1757 net_device_entry(net->dev_base_head.next);
1758}
Pavel Emelianov7562f872007-05-03 15:13:45 -07001759
Eric Dumazetccf43432011-01-26 18:08:02 +00001760static inline struct net_device *first_net_device_rcu(struct net *net)
1761{
1762 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1763
1764 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1765}
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767extern int netdev_boot_setup_check(struct net_device *dev);
1768extern unsigned long netdev_boot_base(const char *prefix, int unit);
Eric Dumazet941666c2010-12-05 01:23:53 +00001769extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1770 const char *hwaddr);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001771extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1772extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773extern void dev_add_pack(struct packet_type *pt);
1774extern void dev_remove_pack(struct packet_type *pt);
1775extern void __dev_remove_pack(struct packet_type *pt);
Vlad Yasevich62532da2012-11-15 08:49:10 +00001776extern void dev_add_offload(struct packet_offload *po);
1777extern void dev_remove_offload(struct packet_offload *po);
1778extern void __dev_remove_offload(struct packet_offload *po);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Eric Dumazetbb69ae02010-06-07 11:42:13 +00001780extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1781 unsigned short mask);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001782extern struct net_device *dev_get_by_name(struct net *net, const char *name);
Eric Dumazet72c95282009-10-30 07:11:27 +00001783extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001784extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785extern int dev_alloc_name(struct net_device *dev, const char *name);
1786extern int dev_open(struct net_device *dev);
1787extern int dev_close(struct net_device *dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001788extern void dev_disable_lro(struct net_device *dev);
Michel Machado95603e22012-06-12 10:16:35 +00001789extern int dev_loopback_xmit(struct sk_buff *newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790extern int dev_queue_xmit(struct sk_buff *skb);
1791extern int register_netdevice(struct net_device *dev);
Eric Dumazet44a08732009-10-27 07:03:04 +00001792extern void unregister_netdevice_queue(struct net_device *dev,
1793 struct list_head *head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00001794extern void unregister_netdevice_many(struct list_head *head);
Eric Dumazet44a08732009-10-27 07:03:04 +00001795static inline void unregister_netdevice(struct net_device *dev)
1796{
1797 unregister_netdevice_queue(dev, NULL);
1798}
1799
Eric Dumazet29b44332010-10-11 10:22:12 +00001800extern int netdev_refcnt_read(const struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801extern void free_netdev(struct net_device *dev);
1802extern void synchronize_net(void);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001803extern int init_dummy_netdev(struct net_device *dev);
1804
Eric W. Biederman881d9662007-09-17 11:56:21 -07001805extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1806extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00001807extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +02001808extern int netdev_get_name(struct net *net, char *name, int ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809extern int dev_restart(struct net_device *dev);
1810#ifdef CONFIG_NETPOLL_TRAP
1811extern int netpoll_trap(void);
1812#endif
Herbert Xu86911732009-01-29 14:19:50 +00001813extern int skb_gro_receive(struct sk_buff **head,
1814 struct sk_buff *skb);
1815
1816static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1817{
1818 return NAPI_GRO_CB(skb)->data_offset;
1819}
1820
1821static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1822{
1823 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1824}
1825
1826static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1827{
1828 NAPI_GRO_CB(skb)->data_offset += len;
1829}
1830
Herbert Xua5b1cf22009-05-26 18:50:28 +00001831static inline void *skb_gro_header_fast(struct sk_buff *skb,
1832 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00001833{
Herbert Xu78a478d2009-05-26 18:50:21 +00001834 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00001835}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
Herbert Xua5b1cf22009-05-26 18:50:28 +00001837static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1838{
1839 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1840}
1841
1842static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1843 unsigned int offset)
1844{
Herbert Xu17dd7592011-07-27 06:16:28 -07001845 if (!pskb_may_pull(skb, hlen))
1846 return NULL;
1847
Herbert Xua5b1cf22009-05-26 18:50:28 +00001848 NAPI_GRO_CB(skb)->frag0 = NULL;
1849 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu17dd7592011-07-27 06:16:28 -07001850 return skb->data + offset;
Herbert Xua5b1cf22009-05-26 18:50:28 +00001851}
1852
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001853static inline void *skb_gro_mac_header(struct sk_buff *skb)
1854{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001855 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001856}
1857
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001858static inline void *skb_gro_network_header(struct sk_buff *skb)
1859{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001860 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1861 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001862}
1863
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001864static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1865 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001866 const void *daddr, const void *saddr,
Eric Dumazet95c96172012-04-15 05:58:06 +00001867 unsigned int len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001868{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02001869 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001870 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001871
1872 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001873}
1874
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001875static inline int dev_parse_header(const struct sk_buff *skb,
1876 unsigned char *haddr)
1877{
1878 const struct net_device *dev = skb->dev;
1879
Patrick McHardy1b833362007-10-18 05:09:28 -07001880 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001881 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001882 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001883}
1884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1886extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1887static inline int unregister_gifconf(unsigned int family)
1888{
1889 return register_gifconf(family, NULL);
1890}
1891
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001892#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04001893#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001894struct sd_flow_limit {
1895 u64 count;
1896 unsigned int num_buckets;
1897 unsigned int history_head;
1898 u16 history[FLOW_LIMIT_HISTORY];
1899 u8 buckets[];
1900};
1901
1902extern int netdev_flow_limit_table_len;
1903#endif /* CONFIG_NET_FLOW_LIMIT */
1904
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905/*
Eric Dumazet88751272010-04-19 05:07:33 +00001906 * Incoming packets are placed on per-cpu queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001908struct softnet_data {
David S. Miller37437bb2008-07-16 02:15:04 -07001909 struct Qdisc *output_queue;
Changli Gaoa9cbd582010-04-26 23:06:24 +00001910 struct Qdisc **output_queue_tailp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 struct sk_buff *completion_queue;
Changli Gao6e7676c2010-04-27 15:07:33 -07001913 struct sk_buff_head process_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
Changli Gaodee42872010-05-02 05:42:16 +00001915 /* stats */
David S. Millercd7b5392010-05-02 22:27:59 -07001916 unsigned int processed;
1917 unsigned int time_squeeze;
1918 unsigned int cpu_collision;
1919 unsigned int received_rps;
Changli Gaodee42872010-05-02 05:42:16 +00001920
Changli Gaofd793d82010-04-15 00:16:59 -07001921#ifdef CONFIG_RPS
Eric Dumazet88751272010-04-19 05:07:33 +00001922 struct softnet_data *rps_ipi_list;
1923
1924 /* Elements below can be accessed between CPUs for RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001925 struct call_single_data csd ____cacheline_aligned_in_smp;
Eric Dumazet88751272010-04-19 05:07:33 +00001926 struct softnet_data *rps_ipi_next;
1927 unsigned int cpu;
Tom Herbertfec5e652010-04-16 16:01:27 -07001928 unsigned int input_queue_head;
Tom Herbert76cc8b12010-05-20 18:37:59 +00001929 unsigned int input_queue_tail;
Tom Herbert1e94d722010-03-18 17:45:44 -07001930#endif
Eric Dumazet95c96172012-04-15 05:58:06 +00001931 unsigned int dropped;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001932 struct sk_buff_head input_pkt_queue;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001933 struct napi_struct backlog;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001934
1935#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04001936 struct sd_flow_limit __rcu *flow_limit;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001937#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938};
1939
Tom Herbert76cc8b12010-05-20 18:37:59 +00001940static inline void input_queue_head_incr(struct softnet_data *sd)
Tom Herbertfec5e652010-04-16 16:01:27 -07001941{
1942#ifdef CONFIG_RPS
Tom Herbert76cc8b12010-05-20 18:37:59 +00001943 sd->input_queue_head++;
1944#endif
1945}
1946
1947static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1948 unsigned int *qtail)
1949{
1950#ifdef CONFIG_RPS
1951 *qtail = ++sd->input_queue_tail;
Tom Herbertfec5e652010-04-16 16:01:27 -07001952#endif
1953}
1954
Tom Herbert0a9627f2010-03-16 08:03:29 +00001955DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
David S. Miller37437bb2008-07-16 02:15:04 -07001957extern void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -07001958
1959static inline void netif_schedule_queue(struct netdev_queue *txq)
1960{
Tom Herbert734664982011-11-28 16:32:44 +00001961 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
David S. Miller37437bb2008-07-16 02:15:04 -07001962 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -07001963}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001965static inline void netif_tx_schedule_all(struct net_device *dev)
1966{
1967 unsigned int i;
1968
1969 for (i = 0; i < dev->num_tx_queues; i++)
1970 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1971}
1972
Dave Jonesd29f7492008-07-22 14:09:06 -07001973static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1974{
Tom Herbert734664982011-11-28 16:32:44 +00001975 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07001976}
1977
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001978/**
1979 * netif_start_queue - allow transmit
1980 * @dev: network device
1981 *
1982 * Allow upper layers to call the device hard_start_xmit routine.
1983 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984static inline void netif_start_queue(struct net_device *dev)
1985{
David S. Millere8a04642008-07-17 00:34:19 -07001986 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987}
1988
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001989static inline void netif_tx_start_all_queues(struct net_device *dev)
1990{
1991 unsigned int i;
1992
1993 for (i = 0; i < dev->num_tx_queues; i++) {
1994 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1995 netif_tx_start_queue(txq);
1996 }
1997}
1998
David S. Miller79d16382008-07-08 23:14:46 -07001999static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000{
2001#ifdef CONFIG_NETPOLL_TRAP
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07002002 if (netpoll_trap()) {
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002003 netif_tx_start_queue(dev_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 return;
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07002005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006#endif
Tom Herbert734664982011-11-28 16:32:44 +00002007 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07002008 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07002009}
2010
Dave Jonesd29f7492008-07-22 14:09:06 -07002011/**
2012 * netif_wake_queue - restart transmit
2013 * @dev: network device
2014 *
2015 * Allow upper layers to call the device hard_start_xmit routine.
2016 * Used for flow control when transmit resources are available.
2017 */
David S. Miller79d16382008-07-08 23:14:46 -07002018static inline void netif_wake_queue(struct net_device *dev)
2019{
David S. Millere8a04642008-07-17 00:34:19 -07002020 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021}
2022
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002023static inline void netif_tx_wake_all_queues(struct net_device *dev)
2024{
2025 unsigned int i;
2026
2027 for (i = 0; i < dev->num_tx_queues; i++) {
2028 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2029 netif_tx_wake_queue(txq);
2030 }
2031}
2032
Dave Jonesd29f7492008-07-22 14:09:06 -07002033static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2034{
Guillaume Chazarain18543a62010-11-06 06:39:32 +00002035 if (WARN_ON(!dev_queue)) {
Joe Perches256ee432011-03-01 07:06:12 +00002036 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
Guillaume Chazarain18543a62010-11-06 06:39:32 +00002037 return;
2038 }
Tom Herbert734664982011-11-28 16:32:44 +00002039 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07002040}
2041
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002042/**
2043 * netif_stop_queue - stop transmitted packets
2044 * @dev: network device
2045 *
2046 * Stop upper layers calling the device hard_start_xmit routine.
2047 * Used for flow control when transmit resources are unavailable.
2048 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049static inline void netif_stop_queue(struct net_device *dev)
2050{
David S. Millere8a04642008-07-17 00:34:19 -07002051 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052}
2053
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002054static inline void netif_tx_stop_all_queues(struct net_device *dev)
2055{
2056 unsigned int i;
2057
2058 for (i = 0; i < dev->num_tx_queues; i++) {
2059 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2060 netif_tx_stop_queue(txq);
2061 }
2062}
2063
David S. Miller4d295152012-03-07 21:02:35 -05002064static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07002065{
Tom Herbert734664982011-11-28 16:32:44 +00002066 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07002067}
2068
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002069/**
2070 * netif_queue_stopped - test if transmit queue is flowblocked
2071 * @dev: network device
2072 *
2073 * Test if transmit queue on device is currently unable to send.
2074 */
David S. Miller4d295152012-03-07 21:02:35 -05002075static inline bool netif_queue_stopped(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
David S. Millere8a04642008-07-17 00:34:19 -07002077 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078}
2079
David S. Miller4d295152012-03-07 21:02:35 -05002080static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
David S. Millerc3f26a22008-07-31 16:58:50 -07002081{
Tom Herbert734664982011-11-28 16:32:44 +00002082 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2083}
2084
David S. Miller4d295152012-03-07 21:02:35 -05002085static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
Tom Herbert734664982011-11-28 16:32:44 +00002086{
2087 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2088}
2089
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002090static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2091 unsigned int bytes)
2092{
Tom Herbert114cf582011-11-28 16:33:09 +00002093#ifdef CONFIG_BQL
2094 dql_queued(&dev_queue->dql, bytes);
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002095
2096 if (likely(dql_avail(&dev_queue->dql) >= 0))
2097 return;
2098
2099 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2100
2101 /*
2102 * The XOFF flag must be set before checking the dql_avail below,
2103 * because in netdev_tx_completed_queue we update the dql_completed
2104 * before checking the XOFF flag.
2105 */
2106 smp_mb();
2107
2108 /* check again in case another CPU has just made room avail */
2109 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2110 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002111#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002112}
2113
Florian Fainelli0042d0c2013-09-06 16:58:00 +01002114/**
2115 * netdev_sent_queue - report the number of bytes queued to hardware
2116 * @dev: network device
2117 * @bytes: number of bytes queued to the hardware device queue
2118 *
2119 * Report the number of bytes queued for sending/completion to the network
2120 * device hardware queue. @bytes should be a good approximation and should
2121 * exactly match netdev_completed_queue() @bytes
2122 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002123static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2124{
2125 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2126}
2127
2128static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
Eric Dumazet95c96172012-04-15 05:58:06 +00002129 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002130{
Tom Herbert114cf582011-11-28 16:33:09 +00002131#ifdef CONFIG_BQL
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002132 if (unlikely(!bytes))
2133 return;
2134
2135 dql_completed(&dev_queue->dql, bytes);
2136
2137 /*
2138 * Without the memory barrier there is a small possiblity that
2139 * netdev_tx_sent_queue will miss the update and cause the queue to
2140 * be stopped forever
2141 */
2142 smp_mb();
2143
2144 if (dql_avail(&dev_queue->dql) < 0)
2145 return;
2146
2147 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2148 netif_schedule_queue(dev_queue);
Tom Herbert114cf582011-11-28 16:33:09 +00002149#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002150}
2151
Florian Fainelli0042d0c2013-09-06 16:58:00 +01002152/**
2153 * netdev_completed_queue - report bytes and packets completed by device
2154 * @dev: network device
2155 * @pkts: actual number of packets sent over the medium
2156 * @bytes: actual number of bytes sent over the medium
2157 *
2158 * Report the number of bytes and packets transmitted by the network device
2159 * hardware queue over the physical medium, @bytes must exactly match the
2160 * @bytes amount passed to netdev_sent_queue()
2161 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002162static inline void netdev_completed_queue(struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00002163 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002164{
2165 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2166}
2167
2168static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2169{
Tom Herbert114cf582011-11-28 16:33:09 +00002170#ifdef CONFIG_BQL
Alexander Duyck5c490352012-02-07 02:29:01 +00002171 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002172 dql_reset(&q->dql);
2173#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002174}
2175
Florian Fainelli0042d0c2013-09-06 16:58:00 +01002176/**
2177 * netdev_reset_queue - reset the packets and bytes count of a network device
2178 * @dev_queue: network device
2179 *
2180 * Reset the bytes and packet count of a network device and clear the
2181 * software flow control OFF bit for this network device
2182 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002183static inline void netdev_reset_queue(struct net_device *dev_queue)
2184{
2185 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
David S. Millerc3f26a22008-07-31 16:58:50 -07002186}
2187
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002188/**
2189 * netif_running - test if up
2190 * @dev: network device
2191 *
2192 * Test if the device has been brought up.
2193 */
David S. Miller4d295152012-03-07 21:02:35 -05002194static inline bool netif_running(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195{
2196 return test_bit(__LINK_STATE_START, &dev->state);
2197}
2198
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002199/*
2200 * Routines to manage the subqueues on a device. We only need start
2201 * stop, and a check if it's stopped. All other device management is
2202 * done at the overall netdevice level.
2203 * Also test the device if we're multiqueue.
2204 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002205
2206/**
2207 * netif_start_subqueue - allow sending packets on subqueue
2208 * @dev: network device
2209 * @queue_index: sub queue index
2210 *
2211 * Start individual transmit queue of a device with multiple transmit queues.
2212 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002213static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2214{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002215 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002216
2217 netif_tx_start_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002218}
2219
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002220/**
2221 * netif_stop_subqueue - stop sending packets on subqueue
2222 * @dev: network device
2223 * @queue_index: sub queue index
2224 *
2225 * Stop individual transmit queue of a device with multiple transmit queues.
2226 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002227static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2228{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002229 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002230#ifdef CONFIG_NETPOLL_TRAP
2231 if (netpoll_trap())
2232 return;
2233#endif
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002234 netif_tx_stop_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002235}
2236
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002237/**
2238 * netif_subqueue_stopped - test status of subqueue
2239 * @dev: network device
2240 * @queue_index: sub queue index
2241 *
2242 * Check individual transmit queue of a device with multiple transmit queues.
2243 */
David S. Miller4d295152012-03-07 21:02:35 -05002244static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2245 u16 queue_index)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002246{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002247 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002248
2249 return netif_tx_queue_stopped(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002250}
2251
David S. Miller4d295152012-03-07 21:02:35 -05002252static inline bool netif_subqueue_stopped(const struct net_device *dev,
2253 struct sk_buff *skb)
Pavel Emelyanov668f8952007-10-21 17:01:56 -07002254{
2255 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2256}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002257
2258/**
2259 * netif_wake_subqueue - allow sending packets on subqueue
2260 * @dev: network device
2261 * @queue_index: sub queue index
2262 *
2263 * Resume individual transmit queue of a device with multiple transmit queues.
2264 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002265static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2266{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002267 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002268#ifdef CONFIG_NETPOLL_TRAP
2269 if (netpoll_trap())
2270 return;
2271#endif
Tom Herbert734664982011-11-28 16:32:44 +00002272 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07002273 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002274}
2275
Alexander Duyck537c00d2013-01-10 08:57:02 +00002276#ifdef CONFIG_XPS
Alexander Duyck537c00d2013-01-10 08:57:02 +00002277extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
2278 u16 index);
2279#else
2280static inline int netif_set_xps_queue(struct net_device *dev,
2281 struct cpumask *mask,
2282 u16 index)
2283{
2284 return 0;
2285}
2286#endif
2287
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002288/*
2289 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2290 * as a distribution range limit for the returned value.
2291 */
2292static inline u16 skb_tx_hash(const struct net_device *dev,
2293 const struct sk_buff *skb)
2294{
2295 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2296}
2297
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002298/**
2299 * netif_is_multiqueue - test if device has multiple transmit queues
2300 * @dev: network device
2301 *
2302 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002303 */
David S. Miller4d295152012-03-07 21:02:35 -05002304static inline bool netif_is_multiqueue(const struct net_device *dev)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002305{
Eric Dumazeta02cec22010-09-22 20:43:57 +00002306 return dev->num_tx_queues > 1;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002307}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
Tom Herberte6484932010-10-18 18:04:39 +00002309extern int netif_set_real_num_tx_queues(struct net_device *dev,
2310 unsigned int txq);
John Fastabendf0796d52010-07-01 13:21:57 +00002311
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002312#ifdef CONFIG_RPS
2313extern int netif_set_real_num_rx_queues(struct net_device *dev,
2314 unsigned int rxq);
2315#else
2316static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2317 unsigned int rxq)
2318{
2319 return 0;
2320}
2321#endif
2322
Ben Hutchings3171d022010-09-27 08:24:49 +00002323static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2324 const struct net_device *from_dev)
2325{
Jiri Pirkoee6ae1a2012-07-20 02:28:46 +00002326 int err;
2327
2328 err = netif_set_real_num_tx_queues(to_dev,
2329 from_dev->real_num_tx_queues);
2330 if (err)
2331 return err;
Ben Hutchings3171d022010-09-27 08:24:49 +00002332#ifdef CONFIG_RPS
2333 return netif_set_real_num_rx_queues(to_dev,
2334 from_dev->real_num_rx_queues);
2335#else
2336 return 0;
2337#endif
2338}
2339
Yuval Mintz16917b82012-07-01 03:18:50 +00002340#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2341extern int netif_get_num_default_rss_queues(void);
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343/* Use this variant when it is known for sure that it
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07002344 * is executing from hardware interrupt context or with hardware interrupts
2345 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002347extern void dev_kfree_skb_irq(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
2349/* Use this variant in places where it could be invoked
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07002350 * from either hardware interrupt or other context, with hardware interrupts
2351 * either disabled or enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002353extern void dev_kfree_skb_any(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355extern int netif_rx(struct sk_buff *skb);
2356extern int netif_rx_ni(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357extern int netif_receive_skb(struct sk_buff *skb);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002358extern gro_result_t napi_gro_receive(struct napi_struct *napi,
Herbert Xud565b0a2008-12-15 23:38:52 -08002359 struct sk_buff *skb);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00002360extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
Herbert Xu76620aa2009-04-16 02:02:07 -07002361extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002362extern gro_result_t napi_gro_frags(struct napi_struct *napi);
Herbert Xu76620aa2009-04-16 02:02:07 -07002363
2364static inline void napi_free_frags(struct napi_struct *napi)
2365{
2366 kfree_skb(napi->skb);
2367 napi->skb = NULL;
2368}
2369
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00002370extern int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00002371 rx_handler_func_t *rx_handler,
2372 void *rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00002373extern void netdev_rx_handler_unregister(struct net_device *dev);
2374
David S. Miller95f050b2012-03-06 16:12:15 -05002375extern bool dev_valid_name(const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002376extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2377extern int dev_ethtool(struct net *net, struct ifreq *);
Eric Dumazet95c96172012-04-15 05:58:06 +00002378extern unsigned int dev_get_flags(const struct net_device *);
Patrick McHardybd380812010-02-26 06:34:53 +00002379extern int __dev_change_flags(struct net_device *, unsigned int flags);
Eric Dumazet95c96172012-04-15 05:58:06 +00002380extern int dev_change_flags(struct net_device *, unsigned int);
Patrick McHardybd380812010-02-26 06:34:53 +00002381extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07002382extern int dev_change_name(struct net_device *, const char *);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07002383extern int dev_set_alias(struct net_device *, const char *, size_t);
Eric W. Biedermance286d32007-09-12 13:53:49 +02002384extern int dev_change_net_namespace(struct net_device *,
2385 struct net *, const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386extern int dev_set_mtu(struct net_device *, int);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00002387extern void dev_set_group(struct net_device *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388extern int dev_set_mac_address(struct net_device *,
2389 struct sockaddr *);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00002390extern int dev_change_carrier(struct net_device *,
2391 bool new_carrier);
Jiri Pirko66b52b02013-07-29 18:16:49 +02002392extern int dev_get_phys_port_id(struct net_device *dev,
2393 struct netdev_phys_port_id *ppid);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002394extern int dev_hard_start_xmit(struct sk_buff *skb,
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002395 struct net_device *dev,
2396 struct netdev_queue *txq);
Arnd Bergmann44540962009-11-26 06:07:08 +00002397extern int dev_forward_skb(struct net_device *dev,
2398 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002400extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
2402/* Called by rtnetlink.c:rtnl_unlock() */
2403extern void netdev_run_todo(void);
2404
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002405/**
2406 * dev_put - release reference to device
2407 * @dev: network device
2408 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002409 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002410 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411static inline void dev_put(struct net_device *dev)
2412{
Christoph Lameter933393f2011-12-22 11:58:51 -06002413 this_cpu_dec(*dev->pcpu_refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414}
2415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002416/**
2417 * dev_hold - get reference to device
2418 * @dev: network device
2419 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002420 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002421 */
Stephen Hemminger15333062006-03-20 22:32:28 -08002422static inline void dev_hold(struct net_device *dev)
2423{
Christoph Lameter933393f2011-12-22 11:58:51 -06002424 this_cpu_inc(*dev->pcpu_refcnt);
Stephen Hemminger15333062006-03-20 22:32:28 -08002425}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
2427/* Carrier loss detection, dial on demand. The functions netif_carrier_on
2428 * and _off may be called from IRQ context, but it is caller
2429 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08002430 *
2431 * The name carrier is inappropriate, these functions should really be
2432 * called netif_lowerlayer_*() because they represent the state of any
2433 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 */
2435
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01002436extern void linkwatch_init_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437extern void linkwatch_fire_event(struct net_device *dev);
Eric Dumazete014deb2009-11-17 05:59:21 +00002438extern void linkwatch_forget_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002440/**
2441 * netif_carrier_ok - test if carrier present
2442 * @dev: network device
2443 *
2444 * Check if carrier is present on device
2445 */
David S. Miller4d295152012-03-07 21:02:35 -05002446static inline bool netif_carrier_ok(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447{
2448 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2449}
2450
Eric Dumazet9d214932009-05-17 20:55:16 -07002451extern unsigned long dev_trans_start(struct net_device *dev);
2452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453extern void __netdev_watchdog_up(struct net_device *dev);
2454
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07002455extern void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07002457extern void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002459/**
2460 * netif_dormant_on - mark device as dormant.
2461 * @dev: network device
2462 *
2463 * Mark device as dormant (as per RFC2863).
2464 *
2465 * The dormant state indicates that the relevant interface is not
2466 * actually in a condition to pass packets (i.e., it is not 'up') but is
2467 * in a "pending" state, waiting for some external event. For "on-
2468 * demand" interfaces, this new state identifies the situation where the
2469 * interface is waiting for events to place it in the up state.
2470 *
2471 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002472static inline void netif_dormant_on(struct net_device *dev)
2473{
2474 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2475 linkwatch_fire_event(dev);
2476}
2477
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002478/**
2479 * netif_dormant_off - set device as not dormant.
2480 * @dev: network device
2481 *
2482 * Device is not in dormant state.
2483 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002484static inline void netif_dormant_off(struct net_device *dev)
2485{
2486 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2487 linkwatch_fire_event(dev);
2488}
2489
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002490/**
2491 * netif_dormant - test if carrier present
2492 * @dev: network device
2493 *
2494 * Check if carrier is present on device
2495 */
David S. Miller4d295152012-03-07 21:02:35 -05002496static inline bool netif_dormant(const struct net_device *dev)
Stefan Rompfb00055a2006-03-20 17:09:11 -08002497{
2498 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2499}
2500
2501
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002502/**
2503 * netif_oper_up - test if device is operational
2504 * @dev: network device
2505 *
2506 * Check if carrier is operational
2507 */
David S. Miller4d295152012-03-07 21:02:35 -05002508static inline bool netif_oper_up(const struct net_device *dev)
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08002509{
Stefan Rompfb00055a2006-03-20 17:09:11 -08002510 return (dev->operstate == IF_OPER_UP ||
2511 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2512}
2513
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002514/**
2515 * netif_device_present - is device available or removed
2516 * @dev: network device
2517 *
2518 * Check if device has not been removed from system.
2519 */
David S. Miller4d295152012-03-07 21:02:35 -05002520static inline bool netif_device_present(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521{
2522 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2523}
2524
Denis Vlasenko56079432006-03-29 15:57:29 -08002525extern void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
Denis Vlasenko56079432006-03-29 15:57:29 -08002527extern void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
2529/*
2530 * Network interface message level settings
2531 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
2533enum {
2534 NETIF_MSG_DRV = 0x0001,
2535 NETIF_MSG_PROBE = 0x0002,
2536 NETIF_MSG_LINK = 0x0004,
2537 NETIF_MSG_TIMER = 0x0008,
2538 NETIF_MSG_IFDOWN = 0x0010,
2539 NETIF_MSG_IFUP = 0x0020,
2540 NETIF_MSG_RX_ERR = 0x0040,
2541 NETIF_MSG_TX_ERR = 0x0080,
2542 NETIF_MSG_TX_QUEUED = 0x0100,
2543 NETIF_MSG_INTR = 0x0200,
2544 NETIF_MSG_TX_DONE = 0x0400,
2545 NETIF_MSG_RX_STATUS = 0x0800,
2546 NETIF_MSG_PKTDATA = 0x1000,
2547 NETIF_MSG_HW = 0x2000,
2548 NETIF_MSG_WOL = 0x4000,
2549};
2550
2551#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2552#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2553#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2554#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2555#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2556#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2557#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2558#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2559#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2560#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2561#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2562#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2563#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2564#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2565#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2566
2567static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2568{
2569 /* use default */
2570 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2571 return default_msg_enable_bits;
2572 if (debug_value == 0) /* no output */
2573 return 0;
2574 /* set low N bits */
2575 return (1 << debug_value) - 1;
2576}
2577
David S. Millerc773e842008-07-08 23:13:53 -07002578static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07002579{
David S. Millerc773e842008-07-08 23:13:53 -07002580 spin_lock(&txq->_xmit_lock);
2581 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002582}
2583
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002584static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2585{
2586 spin_lock_bh(&txq->_xmit_lock);
2587 txq->xmit_lock_owner = smp_processor_id();
2588}
2589
David S. Miller4d295152012-03-07 21:02:35 -05002590static inline bool __netif_tx_trylock(struct netdev_queue *txq)
David S. Millerc773e842008-07-08 23:13:53 -07002591{
David S. Miller4d295152012-03-07 21:02:35 -05002592 bool ok = spin_trylock(&txq->_xmit_lock);
David S. Millerc773e842008-07-08 23:13:53 -07002593 if (likely(ok))
2594 txq->xmit_lock_owner = smp_processor_id();
2595 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07002596}
2597
David S. Millerc773e842008-07-08 23:13:53 -07002598static inline void __netif_tx_unlock(struct netdev_queue *txq)
2599{
2600 txq->xmit_lock_owner = -1;
2601 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07002602}
2603
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002604static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2605{
2606 txq->xmit_lock_owner = -1;
2607 spin_unlock_bh(&txq->_xmit_lock);
2608}
2609
Eric Dumazet08baf562009-05-25 22:58:01 -07002610static inline void txq_trans_update(struct netdev_queue *txq)
2611{
2612 if (txq->xmit_lock_owner != -1)
2613 txq->trans_start = jiffies;
2614}
2615
David S. Millerc3f26a22008-07-31 16:58:50 -07002616/**
2617 * netif_tx_lock - grab network device transmit lock
2618 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07002619 *
2620 * Get network device transmit lock
2621 */
2622static inline void netif_tx_lock(struct net_device *dev)
2623{
2624 unsigned int i;
2625 int cpu;
2626
2627 spin_lock(&dev->tx_global_lock);
2628 cpu = smp_processor_id();
2629 for (i = 0; i < dev->num_tx_queues; i++) {
2630 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2631
2632 /* We are the only thread of execution doing a
2633 * freeze, but we have to grab the _xmit_lock in
2634 * order to synchronize with threads which are in
2635 * the ->hard_start_xmit() handler and already
2636 * checked the frozen bit.
2637 */
2638 __netif_tx_lock(txq, cpu);
2639 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2640 __netif_tx_unlock(txq);
2641 }
2642}
2643
2644static inline void netif_tx_lock_bh(struct net_device *dev)
2645{
2646 local_bh_disable();
2647 netif_tx_lock(dev);
2648}
2649
Herbert Xu932ff272006-06-09 12:20:56 -07002650static inline void netif_tx_unlock(struct net_device *dev)
2651{
David S. Millere8a04642008-07-17 00:34:19 -07002652 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07002653
David S. Millere8a04642008-07-17 00:34:19 -07002654 for (i = 0; i < dev->num_tx_queues; i++) {
2655 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07002656
David S. Millerc3f26a22008-07-31 16:58:50 -07002657 /* No need to grab the _xmit_lock here. If the
2658 * queue is not stopped for another reason, we
2659 * force a schedule.
2660 */
2661 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002662 netif_schedule_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07002663 }
2664 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07002665}
2666
2667static inline void netif_tx_unlock_bh(struct net_device *dev)
2668{
David S. Millere8a04642008-07-17 00:34:19 -07002669 netif_tx_unlock(dev);
2670 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07002671}
2672
David S. Millerc773e842008-07-08 23:13:53 -07002673#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002674 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07002675 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002676 } \
2677}
2678
David S. Millerc773e842008-07-08 23:13:53 -07002679#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002680 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07002681 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002682 } \
2683}
2684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685static inline void netif_tx_disable(struct net_device *dev)
2686{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002687 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07002688 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002689
David S. Millerc3f26a22008-07-31 16:58:50 -07002690 local_bh_disable();
2691 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002692 for (i = 0; i < dev->num_tx_queues; i++) {
2693 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07002694
2695 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002696 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07002697 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002698 }
David S. Millerc3f26a22008-07-31 16:58:50 -07002699 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700}
2701
David S. Millere308a5d2008-07-15 00:13:44 -07002702static inline void netif_addr_lock(struct net_device *dev)
2703{
2704 spin_lock(&dev->addr_list_lock);
2705}
2706
Jiri Pirko2429f7a2012-01-09 06:36:54 +00002707static inline void netif_addr_lock_nested(struct net_device *dev)
2708{
2709 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2710}
2711
David S. Millere308a5d2008-07-15 00:13:44 -07002712static inline void netif_addr_lock_bh(struct net_device *dev)
2713{
2714 spin_lock_bh(&dev->addr_list_lock);
2715}
2716
2717static inline void netif_addr_unlock(struct net_device *dev)
2718{
2719 spin_unlock(&dev->addr_list_lock);
2720}
2721
2722static inline void netif_addr_unlock_bh(struct net_device *dev)
2723{
2724 spin_unlock_bh(&dev->addr_list_lock);
2725}
2726
Jiri Pirkof001fde2009-05-05 02:48:28 +00002727/*
Jiri Pirko31278e72009-06-17 01:12:19 +00002728 * dev_addrs walker. Should be used only for read access. Call with
Jiri Pirkof001fde2009-05-05 02:48:28 +00002729 * rcu_read_lock held.
2730 */
2731#define for_each_dev_addr(dev, ha) \
Jiri Pirko31278e72009-06-17 01:12:19 +00002732 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00002733
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734/* These functions live elsewhere (drivers/net/net_init.c, but related) */
2735
2736extern void ether_setup(struct net_device *dev);
2737
2738/* Support for loadable net-drivers */
Tom Herbert36909ea2011-01-09 19:36:31 +00002739extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002740 void (*setup)(struct net_device *),
Tom Herbert36909ea2011-01-09 19:36:31 +00002741 unsigned int txqs, unsigned int rxqs);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002742#define alloc_netdev(sizeof_priv, name, setup) \
Tom Herbert36909ea2011-01-09 19:36:31 +00002743 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2744
2745#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2746 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2747
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748extern int register_netdev(struct net_device *dev);
2749extern void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00002750
Jiri Pirko22bedad32010-04-01 21:22:57 +00002751/* General hardware address lists handling functions */
2752extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2753 struct netdev_hw_addr_list *from_list,
2754 int addr_len, unsigned char addr_type);
2755extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2756 struct netdev_hw_addr_list *from_list,
2757 int addr_len, unsigned char addr_type);
2758extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2759 struct netdev_hw_addr_list *from_list,
2760 int addr_len);
2761extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2762 struct netdev_hw_addr_list *from_list,
2763 int addr_len);
2764extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2765extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2766
Jiri Pirkof001fde2009-05-05 02:48:28 +00002767/* Functions used for device addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002768extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
Jiri Pirkof001fde2009-05-05 02:48:28 +00002769 unsigned char addr_type);
stephen hemminger6b6e2722012-09-17 10:03:26 +00002770extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
Jiri Pirkof001fde2009-05-05 02:48:28 +00002771 unsigned char addr_type);
2772extern int dev_addr_add_multiple(struct net_device *to_dev,
2773 struct net_device *from_dev,
2774 unsigned char addr_type);
2775extern int dev_addr_del_multiple(struct net_device *to_dev,
2776 struct net_device *from_dev,
2777 unsigned char addr_type);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002778extern void dev_addr_flush(struct net_device *dev);
2779extern int dev_addr_init(struct net_device *dev);
2780
2781/* Functions used for unicast addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002782extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2783extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2784extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002785extern int dev_uc_sync(struct net_device *to, struct net_device *from);
Vlad Yasevich4cd729b02013-04-15 09:54:25 +00002786extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002787extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2788extern void dev_uc_flush(struct net_device *dev);
2789extern void dev_uc_init(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00002790
Jiri Pirko22bedad32010-04-01 21:22:57 +00002791/* Functions used for multicast addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002792extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2793extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2794extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2795extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2796extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
Jiri Pirko22bedad32010-04-01 21:22:57 +00002797extern int dev_mc_sync(struct net_device *to, struct net_device *from);
Vlad Yasevich4cd729b02013-04-15 09:54:25 +00002798extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
Jiri Pirko22bedad32010-04-01 21:22:57 +00002799extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2800extern void dev_mc_flush(struct net_device *dev);
2801extern void dev_mc_init(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002802
2803/* Functions used for secondary unicast and multicast support */
2804extern void dev_set_rx_mode(struct net_device *dev);
2805extern void __dev_set_rx_mode(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002806extern int dev_set_promiscuity(struct net_device *dev, int inc);
2807extern int dev_set_allmulti(struct net_device *dev, int inc);
2808extern void netdev_state_change(struct net_device *dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00002809extern void netdev_notify_peers(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002810extern void netdev_features_change(struct net_device *dev);
2811/* Load a device via the kmod */
2812extern void dev_load(struct net *net, const char *name);
Ben Hutchingsd7753512010-07-09 09:12:41 +00002813extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2814 struct rtnl_link_stats64 *storage);
Eric Dumazet77a1abf2012-03-05 04:50:09 +00002815extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2816 const struct net_device_stats *netdev_stats);
Herbert Xufb286bb2005-11-10 13:01:24 -08002817
2818extern int netdev_max_backlog;
Eric Dumazet3b098e22010-05-15 23:57:10 -07002819extern int netdev_tstamp_prequeue;
Herbert Xufb286bb2005-11-10 13:01:24 -08002820extern int weight_p;
Eric Dumazet0a148422011-04-20 09:27:32 +00002821extern int bpf_jit_enable;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00002822
2823extern bool netdev_has_upper_dev(struct net_device *dev,
2824 struct net_device *upper_dev);
2825extern bool netdev_has_any_upper_dev(struct net_device *dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02002826extern struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
2827 struct list_head **iter);
Veaceslav Falico8b5be852013-08-28 23:25:08 +02002828
2829/* iterate through upper list, must be called under RCU read lock */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02002830#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
2831 for (iter = &(dev)->all_adj_list.upper, \
2832 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
2833 updev; \
2834 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
Veaceslav Falico8b5be852013-08-28 23:25:08 +02002835
Jiri Pirko9ff162a2013-01-03 22:48:49 +00002836extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2837extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2838extern int netdev_upper_dev_link(struct net_device *dev,
2839 struct net_device *upper_dev);
2840extern int netdev_master_upper_dev_link(struct net_device *dev,
2841 struct net_device *upper_dev);
2842extern void netdev_upper_dev_unlink(struct net_device *dev,
2843 struct net_device *upper_dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002844extern int skb_checksum_help(struct sk_buff *skb);
Cong Wang12b00042013-02-05 16:36:38 +00002845extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2846 netdev_features_t features, bool tx_path);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002847extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2848 netdev_features_t features);
Cong Wang12b00042013-02-05 16:36:38 +00002849
2850static inline
2851struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2852{
2853 return __skb_gso_segment(skb, features, true);
2854}
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002855__be16 skb_network_protocol(struct sk_buff *skb);
2856
2857static inline bool can_checksum_protocol(netdev_features_t features,
2858 __be16 protocol)
2859{
2860 return ((features & NETIF_F_GEN_CSUM) ||
2861 ((features & NETIF_F_V4_CSUM) &&
2862 protocol == htons(ETH_P_IP)) ||
2863 ((features & NETIF_F_V6_CSUM) &&
2864 protocol == htons(ETH_P_IPV6)) ||
2865 ((features & NETIF_F_FCOE_CRC) &&
2866 protocol == htons(ETH_P_FCOE)));
2867}
Cong Wang12b00042013-02-05 16:36:38 +00002868
Herbert Xufb286bb2005-11-10 13:01:24 -08002869#ifdef CONFIG_BUG
2870extern void netdev_rx_csum_fault(struct net_device *dev);
2871#else
2872static inline void netdev_rx_csum_fault(struct net_device *dev)
2873{
2874}
2875#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876/* rx skb timestamps */
2877extern void net_enable_timestamp(void);
2878extern void net_disable_timestamp(void);
2879
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002880#ifdef CONFIG_PROC_FS
Cong Wang900ff8c2013-02-18 19:20:33 +00002881extern int __init dev_proc_init(void);
2882#else
2883#define dev_proc_init() 0
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002884#endif
2885
Jay Vosburghb8a97872008-06-13 18:12:04 -07002886extern int netdev_class_create_file(struct class_attribute *class_attr);
2887extern void netdev_class_remove_file(struct class_attribute *class_attr);
2888
Johannes Berg04600792010-08-05 17:45:15 +02002889extern struct kobj_ns_type_operations net_ns_type_operations;
2890
David S. Miller3019de12011-06-06 16:41:33 -07002891extern const char *netdev_drivername(const struct net_device *dev);
Arjan van de Ven6579e572008-07-21 13:31:48 -07002892
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002893extern void linkwatch_run_queue(void);
2894
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002895static inline netdev_features_t netdev_get_wanted_features(
2896 struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00002897{
2898 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2899}
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002900netdev_features_t netdev_increment_features(netdev_features_t all,
2901 netdev_features_t one, netdev_features_t mask);
Eric Dumazetb0ce3502013-05-16 07:34:53 +00002902
2903/* Allow TSO being used on stacked device :
2904 * Performing the GSO segmentation before last device
2905 * is a performance improvement.
2906 */
2907static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2908 netdev_features_t mask)
2909{
2910 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2911}
2912
Michał Mirosław6cb6a272011-04-02 22:48:47 -07002913int __netdev_update_features(struct net_device *dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00002914void netdev_update_features(struct net_device *dev);
Michał Mirosławafe12cc2011-05-07 03:22:17 +00002915void netdev_change_features(struct net_device *dev);
Herbert Xu7f353bf2007-08-10 15:47:58 -07002916
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08002917void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2918 struct net_device *dev);
2919
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002920netdev_features_t netif_skb_features(struct sk_buff *skb);
Jesse Gross58e998c2010-10-29 12:14:55 +00002921
David S. Miller4d295152012-03-07 21:02:35 -05002922static inline bool net_gso_ok(netdev_features_t features, int gso_type)
Herbert Xubcd76112006-06-30 13:36:35 -07002923{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002924 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
Michał Mirosław0345e182011-11-16 14:05:33 +00002925
2926 /* check flags correspondence */
2927 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2928 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2929 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2930 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2931 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2932 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2933
Herbert Xubcd76112006-06-30 13:36:35 -07002934 return (features & feature) == feature;
2935}
2936
David S. Miller4d295152012-03-07 21:02:35 -05002937static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
Herbert Xu576a30e2006-06-27 13:22:38 -07002938{
Herbert Xu278b2512009-06-03 21:20:51 -07002939 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
David S. Miller21dc3302010-08-23 00:13:46 -07002940 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07002941}
2942
David S. Miller4d295152012-03-07 21:02:35 -05002943static inline bool netif_needs_gso(struct sk_buff *skb,
2944 netdev_features_t features)
Herbert Xu79671682006-06-22 02:40:14 -07002945{
Jesse Grossfc741212011-01-09 06:23:32 +00002946 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
Yi Zoucdbee742012-03-16 23:08:11 +00002947 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2948 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
Herbert Xu79671682006-06-22 02:40:14 -07002949}
2950
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002951static inline void netif_set_gso_max_size(struct net_device *dev,
2952 unsigned int size)
2953{
2954 dev->gso_max_size = size;
2955}
2956
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00002957static inline bool netif_is_bond_master(struct net_device *dev)
2958{
2959 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2960}
2961
David S. Miller4d295152012-03-07 21:02:35 -05002962static inline bool netif_is_bond_slave(struct net_device *dev)
Jiri Pirko1765a572011-02-12 06:48:36 +00002963{
2964 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2965}
2966
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002967static inline bool netif_supports_nofcs(struct net_device *dev)
2968{
2969 return dev->priv_flags & IFF_SUPP_NOFCS;
2970}
2971
Eric W. Biederman505d4f72008-11-07 22:54:20 -08002972extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00002973
Joe Perches571ba422010-02-09 11:49:47 +00002974/* Logging, debugging and troubleshooting/diagnostic helpers. */
2975
2976/* netdev_printk helpers, similar to dev_printk */
2977
2978static inline const char *netdev_name(const struct net_device *dev)
2979{
2980 if (dev->reg_state != NETREG_REGISTERED)
2981 return "(unregistered net_device)";
2982 return dev->name;
2983}
2984
Joe Perchesb9075fa2011-10-31 17:11:33 -07002985extern __printf(3, 4)
2986int netdev_printk(const char *level, const struct net_device *dev,
2987 const char *format, ...);
2988extern __printf(2, 3)
2989int netdev_emerg(const struct net_device *dev, const char *format, ...);
2990extern __printf(2, 3)
2991int netdev_alert(const struct net_device *dev, const char *format, ...);
2992extern __printf(2, 3)
2993int netdev_crit(const struct net_device *dev, const char *format, ...);
2994extern __printf(2, 3)
2995int netdev_err(const struct net_device *dev, const char *format, ...);
2996extern __printf(2, 3)
2997int netdev_warn(const struct net_device *dev, const char *format, ...);
2998extern __printf(2, 3)
2999int netdev_notice(const struct net_device *dev, const char *format, ...);
3000extern __printf(2, 3)
3001int netdev_info(const struct net_device *dev, const char *format, ...);
Joe Perches571ba422010-02-09 11:49:47 +00003002
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03003003#define MODULE_ALIAS_NETDEV(device) \
3004 MODULE_ALIAS("netdev-" device)
3005
Jim Cromieb558c962011-12-19 17:11:18 -05003006#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perches571ba422010-02-09 11:49:47 +00003007#define netdev_dbg(__dev, format, args...) \
3008do { \
Jason Baronffa10cb2011-08-11 14:36:48 -04003009 dynamic_netdev_dbg(__dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00003010} while (0)
Jim Cromieb558c962011-12-19 17:11:18 -05003011#elif defined(DEBUG)
3012#define netdev_dbg(__dev, format, args...) \
3013 netdev_printk(KERN_DEBUG, __dev, format, ##args)
Joe Perches571ba422010-02-09 11:49:47 +00003014#else
3015#define netdev_dbg(__dev, format, args...) \
3016({ \
3017 if (0) \
3018 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3019 0; \
3020})
3021#endif
3022
3023#if defined(VERBOSE_DEBUG)
3024#define netdev_vdbg netdev_dbg
3025#else
3026
3027#define netdev_vdbg(dev, format, args...) \
3028({ \
3029 if (0) \
3030 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3031 0; \
3032})
3033#endif
3034
3035/*
3036 * netdev_WARN() acts like dev_printk(), but with the key difference
3037 * of using a WARN/WARN_ON to get the message out, including the
3038 * file/line information and a backtrace.
3039 */
3040#define netdev_WARN(dev, format, args...) \
3041 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
3042
Joe Perchesb3d95c52010-02-09 11:49:49 +00003043/* netif printk helpers, similar to netdev_printk */
3044
3045#define netif_printk(priv, type, level, dev, fmt, args...) \
3046do { \
3047 if (netif_msg_##type(priv)) \
3048 netdev_printk(level, (dev), fmt, ##args); \
3049} while (0)
3050
Joe Perchesf45f4322010-06-27 01:02:36 +00003051#define netif_level(level, priv, type, dev, fmt, args...) \
3052do { \
3053 if (netif_msg_##type(priv)) \
3054 netdev_##level(dev, fmt, ##args); \
3055} while (0)
3056
Joe Perchesb3d95c52010-02-09 11:49:49 +00003057#define netif_emerg(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003058 netif_level(emerg, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003059#define netif_alert(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003060 netif_level(alert, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003061#define netif_crit(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003062 netif_level(crit, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003063#define netif_err(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003064 netif_level(err, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003065#define netif_warn(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003066 netif_level(warn, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003067#define netif_notice(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003068 netif_level(notice, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003069#define netif_info(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00003070 netif_level(info, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003071
Joe Perches0053ea92012-05-30 07:43:34 +00003072#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003073#define netif_dbg(priv, type, netdev, format, args...) \
3074do { \
3075 if (netif_msg_##type(priv)) \
Jason Baronb5fb0a02011-08-11 14:36:53 -04003076 dynamic_netdev_dbg(netdev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00003077} while (0)
Joe Perches0053ea92012-05-30 07:43:34 +00003078#elif defined(DEBUG)
3079#define netif_dbg(priv, type, dev, format, args...) \
3080 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00003081#else
3082#define netif_dbg(priv, type, dev, format, args...) \
3083({ \
3084 if (0) \
3085 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3086 0; \
3087})
3088#endif
3089
3090#if defined(VERBOSE_DEBUG)
Ben Hutchingsbcfcc452010-07-02 07:08:44 +00003091#define netif_vdbg netif_dbg
Joe Perchesb3d95c52010-02-09 11:49:49 +00003092#else
3093#define netif_vdbg(priv, type, dev, format, args...) \
3094({ \
3095 if (0) \
Ben Hutchingsa4ed89c2010-05-18 06:56:32 +00003096 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00003097 0; \
3098})
3099#endif
Joe Perches571ba422010-02-09 11:49:47 +00003100
Cong Wang900ff8c2013-02-18 19:20:33 +00003101/*
3102 * The list of packet types we will receive (as opposed to discard)
3103 * and the routines to invoke.
3104 *
3105 * Why 16. Because with 16 the only overlap we get on a hash of the
3106 * low nibble of the protocol value is RARP/SNAP/X.25.
3107 *
3108 * NOTE: That is no longer true with the addition of VLAN tags. Not
3109 * sure which should go first, but I bet it won't make much
3110 * difference if we are running VLANs. The good news is that
3111 * this protocol won't be in the list unless compiled in, so
3112 * the average user (w/out VLANs) will not be adversely affected.
3113 * --BLG
3114 *
3115 * 0800 IP
3116 * 8100 802.1Q VLAN
3117 * 0001 802.3
3118 * 0002 AX.25
3119 * 0004 802.2
3120 * 8035 RARP
3121 * 0005 SNAP
3122 * 0805 X.25
3123 * 0806 ARP
3124 * 8137 IPX
3125 * 0009 Localtalk
3126 * 86DD IPv6
3127 */
3128#define PTYPE_HASH_SIZE (16)
3129#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3130
Jiri Pirko385a1542009-05-27 15:48:07 -07003131#endif /* _LINUX_NETDEVICE_H */