blob: 2ecb96d9a1e5ae4448014dd3872007f44a912f53 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070014 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
Jean Pihete8db0be2011-08-25 15:35:03 +020028#include <linux/pm_qos.h>
Al Virod7fe0f22006-12-03 23:15:30 -050029#include <linux/timer.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050030#include <linux/bug.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070031#include <linux/delay.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/cache.h>
34#include <asm/byteorder.h>
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070037#include <linux/rculist.h>
Chris Leechdb217332006-06-17 21:24:58 -070038#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070039#include <linux/workqueue.h>
Tom Herbert114cf582011-11-28 16:33:09 +000040#include <linux/dynamic_queue_limits.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000042#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020043#include <net/net_namespace.h>
Lennert Buytenhekcf85d082008-10-07 13:45:02 +000044#include <net/dsa.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080045#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080046#include <net/dcbnl.h>
47#endif
Neil Horman5bc14212011-11-22 05:10:51 +000048#include <net/netprio_cgroup.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020049
Michał Mirosława59e2ec2011-11-15 15:29:55 +000050#include <linux/netdev_features.h>
John Fastabend77162022012-04-15 06:43:56 +000051#include <linux/neighbour.h>
David Howells607ca462012-10-13 10:46:48 +010052#include <uapi/linux/netdevice.h>
Michał Mirosława59e2ec2011-11-15 15:29:55 +000053
Jeff Moyer115c1d62005-06-22 22:05:31 -070054struct netpoll_info;
Paul Gortmaker313162d2012-01-30 11:46:54 -050055struct device;
Richard Cochranc1f19b52010-07-17 08:49:36 +000056struct phy_device;
Johannes Berg704232c2007-04-23 12:20:05 -070057/* 802.11 specific */
58struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 /* source back-compat hooks */
60#define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) )
62
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +000063extern void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops);
65
Stefan Assmannc1f79422010-07-22 02:50:21 +000066/* hardware address assignment types */
67#define NET_ADDR_PERM 0 /* address is permanent (default) */
68#define NET_ADDR_RANDOM 1 /* address is generated randomly */
69#define NET_ADDR_STOLEN 2 /* address is stolen from other device */
Jiri Pirkofbdeca22013-01-01 03:30:16 +000070#define NET_ADDR_SET 3 /* address is set using
71 * dev_set_mac_address() */
Stefan Assmannc1f79422010-07-22 02:50:21 +000072
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000073/* Backlog congestion levels */
74#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75#define NET_RX_DROP 1 /* packet dropped */
76
Patrick McHardy572a9d72009-11-10 06:14:14 +000077/*
78 * Transmit return codes: transmit return codes originate from three different
79 * namespaces:
80 *
81 * - qdisc return codes
82 * - driver transmit return codes
83 * - errno values
84 *
85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously, in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), all
91 * others are propagated to higher layers.
92 */
93
94/* qdisc ->enqueue() return codes. */
95#define NET_XMIT_SUCCESS 0x00
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000096#define NET_XMIT_DROP 0x01 /* skb dropped */
97#define NET_XMIT_CN 0x02 /* congestion notification */
98#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
99#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200101/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
102 * indicates that the device will soon be dropping packets, or already drops
103 * some packets of the same priority; prompting us to send less aggressively. */
Patrick McHardy572a9d72009-11-10 06:14:14 +0000104#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000107/* Driver transmit return codes */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000108#define NETDEV_TX_MASK 0xf0
Patrick McHardy572a9d72009-11-10 06:14:14 +0000109
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000110enum netdev_tx {
Patrick McHardy572a9d72009-11-10 06:14:14 +0000111 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000112 NETDEV_TX_OK = 0x00, /* driver took care of packet */
113 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
114 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000115};
116typedef enum netdev_tx netdev_tx_t;
117
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000118/*
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
121 */
122static inline bool dev_xmit_complete(int rc)
123{
124 /*
125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
129 */
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/*
137 * Compute the worst case header length according to the protocols
138 * used.
139 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800140
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000141#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
David S. Miller8388e3d2008-05-12 20:17:33 -0700142# if defined(CONFIG_MAC80211_MESH)
143# define LL_MAX_HEADER 128
144# else
145# define LL_MAX_HEADER 96
146# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700148# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149#endif
150
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000151#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define MAX_HEADER LL_MAX_HEADER
154#else
155#define MAX_HEADER (LL_MAX_HEADER + 48)
156#endif
157
158/*
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000159 * Old network device statistics. Fields are native words
160 * (unsigned long) so they can be read and written atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800162
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800163struct net_device_stats {
Ben Hutchings3cfde792010-07-09 09:11:52 +0000164 unsigned long rx_packets;
165 unsigned long tx_packets;
166 unsigned long rx_bytes;
167 unsigned long tx_bytes;
168 unsigned long rx_errors;
169 unsigned long tx_errors;
170 unsigned long rx_dropped;
171 unsigned long tx_dropped;
172 unsigned long multicast;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 unsigned long collisions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 unsigned long rx_length_errors;
Ben Hutchings3cfde792010-07-09 09:11:52 +0000175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 unsigned long rx_compressed;
186 unsigned long tx_compressed;
187};
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190#include <linux/cache.h>
191#include <linux/skbuff.h>
192
Eric Dumazetadc93002011-11-17 03:13:26 +0000193#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +0100194#include <linux/static_key.h>
195extern struct static_key rps_needed;
Eric Dumazetadc93002011-11-17 03:13:26 +0000196#endif
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198struct neighbour;
199struct neigh_parms;
200struct sk_buff;
201
Jiri Pirkof001fde2009-05-05 02:48:28 +0000202struct netdev_hw_addr {
203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000206#define NETDEV_HW_ADDR_T_LAN 1
207#define NETDEV_HW_ADDR_T_SAN 2
208#define NETDEV_HW_ADDR_T_SLAVE 3
209#define NETDEV_HW_ADDR_T_UNICAST 4
Jiri Pirko22bedad32010-04-01 21:22:57 +0000210#define NETDEV_HW_ADDR_T_MULTICAST 5
Jiri Pirko22bedad32010-04-01 21:22:57 +0000211 bool global_use;
Vlad Yasevich4cd729b02013-04-15 09:54:25 +0000212 int sync_cnt;
Eric Dumazet8f8f1032010-09-19 11:24:02 -0700213 int refcount;
Vlad Yasevich4543fbe2013-04-02 17:10:07 -0400214 int synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000215 struct rcu_head rcu_head;
216};
217
Jiri Pirko31278e72009-06-17 01:12:19 +0000218struct netdev_hw_addr_list {
219 struct list_head list;
220 int count;
221};
222
Jiri Pirko22bedad32010-04-01 21:22:57 +0000223#define netdev_hw_addr_list_count(l) ((l)->count)
224#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225#define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list)
227
228#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800230#define netdev_for_each_uc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800232
Jiri Pirko22bedad32010-04-01 21:22:57 +0000233#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
Pavel Roskin18e225f2010-04-07 16:40:09 -0700235#define netdev_for_each_mc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
Jiri Pirko6683ece32010-02-04 10:22:25 -0800237
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800238struct hh_cache {
David S. Millerf6b72b622011-07-14 07:53:20 -0700239 u16 hh_len;
David S. Miller5c25f682011-07-13 00:51:10 -0700240 u16 __pad;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800241 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 /* cached hardware header; allow for machine alignment needs. */
244#define HH_DATA_MOD 16
245#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250};
251
252/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
253 * Alternative is:
254 * dev->hard_header_len ? (dev->hard_header_len +
255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
256 *
257 * We could use other alignment values, but we must maintain the
258 * relationship HH alignment <= LL alignment.
259 */
260#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700265struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr,
Eric Dumazet95c96172012-04-15 05:58:06 +0000268 const void *saddr, unsigned int len);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*rebuild)(struct sk_buff *skb);
David S. Millere69dd332011-07-12 23:28:12 -0700271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev,
274 const unsigned char *haddr);
275};
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277/* These flag bits are private to the generic network queueing
278 * layer, they may not be explicitly referenced by any other
279 * code.
280 */
281
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800282enum netdev_state_t {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288};
289
290
291/*
292 * This structure holds at boot time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800293 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 */
295struct netdev_boot_setup {
296 char name[IFNAMSIZ];
297 struct ifmap map;
298};
299#define NETDEV_BOOT_SETUP_MAX 8
300
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -0300301extern int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700304 * Structure for NAPI scheduling similar to tasklet but with weighting
305 */
306struct napi_struct {
307 /* The poll_list must only be managed by the entity which
308 * changes the state of the NAPI_STATE_SCHED bit. This means
309 * whoever atomically sets that bit can add this napi_struct
310 * to the per-cpu poll_list, and whoever clears that bit
311 * can remove from the list right before clearing the bit.
312 */
313 struct list_head poll_list;
314
315 unsigned long state;
316 int weight;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000317 unsigned int gro_count;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700318 int (*poll)(struct napi_struct *, int);
319#ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock;
321 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700322#endif
Herbert Xu5d38a072009-01-04 16:13:40 -0800323 struct net_device *dev;
Herbert Xud565b0a2008-12-15 23:38:52 -0800324 struct sk_buff *gro_list;
Herbert Xu5d38a072009-01-04 16:13:40 -0800325 struct sk_buff *skb;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000326 struct list_head dev_list;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300327 struct hlist_node napi_hash_node;
328 unsigned int napi_id;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700329};
330
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800331enum {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700332 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800333 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300335 NAPI_STATE_HASHED, /* In NAPI hash */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700336};
337
Ben Hutchings5b252f02009-10-29 07:17:09 +0000338enum gro_result {
Herbert Xud1c76af2009-03-16 10:50:02 -0700339 GRO_MERGED,
340 GRO_MERGED_FREE,
341 GRO_HELD,
342 GRO_NORMAL,
343 GRO_DROP,
344};
Ben Hutchings5b252f02009-10-29 07:17:09 +0000345typedef enum gro_result gro_result_t;
Herbert Xud1c76af2009-03-16 10:50:02 -0700346
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000347/*
348 * enum rx_handler_result - Possible return values for rx_handlers.
349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
350 * further.
351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
352 * case skb->dev was changed by rx_handler.
353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
354 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
355 *
356 * rx_handlers are functions called from inside __netif_receive_skb(), to do
357 * special processing of the skb, prior to delivery to protocol handlers.
358 *
359 * Currently, a net_device can only have a single rx_handler registered. Trying
360 * to register a second rx_handler will return -EBUSY.
361 *
362 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
363 * To unregister a rx_handler on a net_device, use
364 * netdev_rx_handler_unregister().
365 *
366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
367 * do with the skb.
368 *
369 * If the rx_handler consumed to skb in some way, it should return
370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
371 * the skb to be delivered in some other ways.
372 *
373 * If the rx_handler changed skb->dev, to divert the skb to another
374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
375 * new device will be called if it exists.
376 *
377 * If the rx_handler consider the skb should be ignored, it should return
378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
Adam Buchbinderd93cf062012-09-19 21:47:58 -0400379 * are registered on exact device (ptype->dev == skb->dev).
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000380 *
381 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
382 * delivered, it should return RX_HANDLER_PASS.
383 *
384 * A device without a registered rx_handler will behave as if rx_handler
385 * returned RX_HANDLER_PASS.
386 */
387
388enum rx_handler_result {
389 RX_HANDLER_CONSUMED,
390 RX_HANDLER_ANOTHER,
391 RX_HANDLER_EXACT,
392 RX_HANDLER_PASS,
393};
394typedef enum rx_handler_result rx_handler_result_t;
395typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000396
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800397extern void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700398
David S. Miller4d295152012-03-07 21:02:35 -0500399static inline bool napi_disable_pending(struct napi_struct *n)
David S. Millera0a46192008-01-07 20:35:07 -0800400{
401 return test_bit(NAPI_STATE_DISABLE, &n->state);
402}
403
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700404/**
405 * napi_schedule_prep - check if napi can be scheduled
406 * @n: napi context
407 *
408 * Test if NAPI routine is already running, and if not mark
409 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800410 * insure only one NAPI poll instance runs. We also make
411 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700412 */
David S. Miller4d295152012-03-07 21:02:35 -0500413static inline bool napi_schedule_prep(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700414{
David S. Millera0a46192008-01-07 20:35:07 -0800415 return !napi_disable_pending(n) &&
416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700417}
418
419/**
420 * napi_schedule - schedule NAPI poll
421 * @n: napi context
422 *
423 * Schedule NAPI poll routine to be called if it is not already
424 * running.
425 */
426static inline void napi_schedule(struct napi_struct *n)
427{
428 if (napi_schedule_prep(n))
429 __napi_schedule(n);
430}
431
Roland Dreierbfe13f52007-10-09 15:47:37 -0700432/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
David S. Miller4d295152012-03-07 21:02:35 -0500433static inline bool napi_reschedule(struct napi_struct *napi)
Roland Dreierbfe13f52007-10-09 15:47:37 -0700434{
435 if (napi_schedule_prep(napi)) {
436 __napi_schedule(napi);
David S. Miller4d295152012-03-07 21:02:35 -0500437 return true;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700438 }
David S. Miller4d295152012-03-07 21:02:35 -0500439 return false;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700440}
441
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700442/**
443 * napi_complete - NAPI processing complete
444 * @n: napi context
445 *
446 * Mark NAPI processing as complete.
447 */
Herbert Xud565b0a2008-12-15 23:38:52 -0800448extern void __napi_complete(struct napi_struct *n);
449extern void napi_complete(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700450
451/**
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300452 * napi_by_id - lookup a NAPI by napi_id
453 * @napi_id: hashed napi_id
454 *
455 * lookup @napi_id in napi_hash table
456 * must be called under rcu_read_lock()
457 */
458extern struct napi_struct *napi_by_id(unsigned int napi_id);
459
460/**
461 * napi_hash_add - add a NAPI to global hashtable
462 * @napi: napi context
463 *
464 * generate a new napi_id and store a @napi under it in napi_hash
465 */
466extern void napi_hash_add(struct napi_struct *napi);
467
468/**
469 * napi_hash_del - remove a NAPI from global table
470 * @napi: napi context
471 *
472 * Warning: caller must observe rcu grace period
473 * before freeing memory containing @napi
474 */
475extern void napi_hash_del(struct napi_struct *napi);
476
477/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700478 * napi_disable - prevent NAPI from scheduling
479 * @n: napi context
480 *
481 * Stop NAPI from being scheduled on this context.
482 * Waits till any outstanding processing completes.
483 */
484static inline void napi_disable(struct napi_struct *n)
485{
David S. Millera0a46192008-01-07 20:35:07 -0800486 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700488 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800489 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700490}
491
492/**
493 * napi_enable - enable NAPI scheduling
494 * @n: napi context
495 *
496 * Resume NAPI from being scheduled on this context.
497 * Must be paired with napi_disable.
498 */
499static inline void napi_enable(struct napi_struct *n)
500{
501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
502 smp_mb__before_clear_bit();
503 clear_bit(NAPI_STATE_SCHED, &n->state);
504}
505
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700506#ifdef CONFIG_SMP
507/**
508 * napi_synchronize - wait until NAPI is not running
509 * @n: napi context
510 *
511 * Wait until NAPI is done being scheduled on this context.
512 * Waits till any outstanding processing completes but
513 * does not disable future activations.
514 */
515static inline void napi_synchronize(const struct napi_struct *n)
516{
517 while (test_bit(NAPI_STATE_SCHED, &n->state))
518 msleep(1);
519}
520#else
521# define napi_synchronize(n) barrier()
522#endif
523
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800524enum netdev_queue_state_t {
Tom Herbert734664982011-11-28 16:32:44 +0000525 __QUEUE_STATE_DRV_XOFF,
526 __QUEUE_STATE_STACK_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700527 __QUEUE_STATE_FROZEN,
Tom Herbert734664982011-11-28 16:32:44 +0000528#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
529 (1 << __QUEUE_STATE_STACK_XOFF))
530#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
531 (1 << __QUEUE_STATE_FROZEN))
David S. Miller79d16382008-07-08 23:14:46 -0700532};
Tom Herbert734664982011-11-28 16:32:44 +0000533/*
534 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
535 * netif_tx_* functions below are used to manipulate this flag. The
536 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
537 * queue independently. The netif_xmit_*stopped functions below are called
538 * to check if the queue has been stopped by the driver or stack (either
539 * of the XOFF bits are set in the state). Drivers should not need to call
540 * netif_xmit*stopped functions, they should only be using netif_tx_*.
541 */
David S. Miller79d16382008-07-08 23:14:46 -0700542
David S. Millerbb949fb2008-07-08 16:55:56 -0700543struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700544/*
545 * read mostly part
546 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700547 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700548 struct Qdisc *qdisc;
549 struct Qdisc *qdisc_sleeping;
david decotignyccf5ff62011-11-16 12:15:10 +0000550#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000551 struct kobject kobj;
552#endif
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000553#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
554 int numa_node;
555#endif
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700556/*
557 * write mostly part
558 */
559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
560 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700561 /*
562 * please use this field instead of dev->trans_start
563 */
564 unsigned long trans_start;
david decotignyccf5ff62011-11-16 12:15:10 +0000565
566 /*
567 * Number of TX timeouts for this queue
568 * (/sys/class/net/DEV/Q/trans_timeout)
569 */
570 unsigned long trans_timeout;
Tom Herbert114cf582011-11-28 16:33:09 +0000571
572 unsigned long state;
573
574#ifdef CONFIG_BQL
575 struct dql dql;
576#endif
David S. Millere8a04642008-07-17 00:34:19 -0700577} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700578
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000579static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
580{
581#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
582 return q->numa_node;
583#else
Changli Gaob236da62010-12-14 03:09:15 +0000584 return NUMA_NO_NODE;
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000585#endif
586}
587
588static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
589{
590#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
591 q->numa_node = node;
592#endif
593}
594
Eric Dumazetdf334542010-03-24 19:13:54 +0000595#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000596/*
597 * This structure holds an RPS map which can be of variable length. The
598 * map is an array of CPUs.
599 */
600struct rps_map {
601 unsigned int len;
602 struct rcu_head rcu;
603 u16 cpus[0];
604};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000605#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
Tom Herbert0a9627f2010-03-16 08:03:29 +0000606
Tom Herbertfec5e652010-04-16 16:01:27 -0700607/*
Ben Hutchingsc4454772011-01-19 11:03:53 +0000608 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
609 * tail pointer for that CPU's input queue at the time of last enqueue, and
610 * a hardware filter index.
Tom Herbertfec5e652010-04-16 16:01:27 -0700611 */
612struct rps_dev_flow {
613 u16 cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +0000614 u16 filter;
Tom Herbertfec5e652010-04-16 16:01:27 -0700615 unsigned int last_qtail;
616};
Ben Hutchingsc4454772011-01-19 11:03:53 +0000617#define RPS_NO_FILTER 0xffff
Tom Herbertfec5e652010-04-16 16:01:27 -0700618
619/*
620 * The rps_dev_flow_table structure contains a table of flow mappings.
621 */
622struct rps_dev_flow_table {
623 unsigned int mask;
624 struct rcu_head rcu;
Tom Herbertfec5e652010-04-16 16:01:27 -0700625 struct rps_dev_flow flows[0];
626};
627#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000628 ((_num) * sizeof(struct rps_dev_flow)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700629
630/*
631 * The rps_sock_flow_table contains mappings of flows to the last CPU
632 * on which they were processed by the application (set in recvmsg).
633 */
634struct rps_sock_flow_table {
635 unsigned int mask;
636 u16 ents[0];
637};
638#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000639 ((_num) * sizeof(u16)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700640
641#define RPS_NO_CPU 0xffff
642
643static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
644 u32 hash)
645{
646 if (table && hash) {
647 unsigned int cpu, index = hash & table->mask;
648
649 /* We only give a hint, preemption can change cpu under us */
650 cpu = raw_smp_processor_id();
651
652 if (table->ents[index] != cpu)
653 table->ents[index] = cpu;
654 }
655}
656
657static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
658 u32 hash)
659{
660 if (table && hash)
661 table->ents[hash & table->mask] = RPS_NO_CPU;
662}
663
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000664extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
Tom Herbertfec5e652010-04-16 16:01:27 -0700665
Ben Hutchingsc4454772011-01-19 11:03:53 +0000666#ifdef CONFIG_RFS_ACCEL
667extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
668 u32 flow_id, u16 filter_id);
669#endif
670
Tom Herbert0a9627f2010-03-16 08:03:29 +0000671/* This structure contains an instance of an RX queue. */
672struct netdev_rx_queue {
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table;
675 struct kobject kobj;
Tom Herbertfe822242010-11-09 10:47:38 +0000676 struct net_device *dev;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000677} ____cacheline_aligned_in_smp;
Tom Herbertfec5e652010-04-16 16:01:27 -0700678#endif /* CONFIG_RPS */
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800679
Tom Herbertbf264142010-11-26 08:36:09 +0000680#ifdef CONFIG_XPS
681/*
682 * This structure holds an XPS map which can be of variable length. The
683 * map is an array of queues.
684 */
685struct xps_map {
686 unsigned int len;
687 unsigned int alloc_len;
688 struct rcu_head rcu;
689 u16 queues[0];
690};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000691#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
Tom Herbertbf264142010-11-26 08:36:09 +0000692#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
693 / sizeof(u16))
694
695/*
696 * This structure holds all XPS maps for device. Maps are indexed by CPU.
697 */
698struct xps_dev_maps {
699 struct rcu_head rcu;
Eric Dumazeta4177862010-11-28 21:43:02 +0000700 struct xps_map __rcu *cpu_map[0];
Tom Herbertbf264142010-11-26 08:36:09 +0000701};
702#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
703 (nr_cpu_ids * sizeof(struct xps_map *)))
704#endif /* CONFIG_XPS */
705
John Fastabend4f57c082011-01-17 08:06:04 +0000706#define TC_MAX_QUEUE 16
707#define TC_BITMASK 15
708/* HW offloaded queuing disciplines txq count and offset maps */
709struct netdev_tc_txq {
710 u16 count;
711 u16 offset;
712};
713
Neerav Parikh68bad942012-01-04 20:23:39 +0000714#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
715/*
716 * This structure is to hold information about the device
717 * configured to run FCoE protocol stack.
718 */
719struct netdev_fcoe_hbainfo {
720 char manufacturer[64];
721 char serial_number[64];
722 char hardware_version[64];
723 char driver_version[64];
724 char optionrom_version[64];
725 char firmware_version[64];
726 char model[256];
727 char model_description[256];
728};
729#endif
730
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800731/*
732 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800733 * The following hooks can be defined; unless noted otherwise, they are
734 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800735 *
736 * int (*ndo_init)(struct net_device *dev);
737 * This function is called once when network device is registered.
738 * The network device can use this to any late stage initializaton
739 * or semantic validattion. It can fail with an error code which will
740 * be propogated back to register_netdev
741 *
742 * void (*ndo_uninit)(struct net_device *dev);
743 * This function is called when device is unregistered or when registration
744 * fails. It is not called if init fails.
745 *
746 * int (*ndo_open)(struct net_device *dev);
747 * This function is called when network device transistions to the up
748 * state.
749 *
750 * int (*ndo_stop)(struct net_device *dev);
751 * This function is called when network device transistions to the down
752 * state.
753 *
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000754 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
755 * struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800756 * Called when a packet needs to be transmitted.
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000757 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
758 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
Stephen Hemminger00829822008-11-20 20:14:53 -0800759 * Required can not be NULL.
760 *
761 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
762 * Called to decide which queue to when device supports multiple
763 * transmit queues.
764 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800765 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
766 * This function is called to allow device receiver to make
767 * changes to configuration when multicast or promiscious is enabled.
768 *
769 * void (*ndo_set_rx_mode)(struct net_device *dev);
770 * This function is called device changes address list filtering.
Jiri Pirko01789342011-08-16 06:29:00 +0000771 * If driver handles unicast address filtering, it should set
772 * IFF_UNICAST_FLT to its priv_flags.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800773 *
774 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
775 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -0700776 * needs to be changed. If this interface is not defined, the
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800777 * mac address can not be changed.
778 *
779 * int (*ndo_validate_addr)(struct net_device *dev);
780 * Test if Media Access Control address is valid for the device.
781 *
782 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
783 * Called when a user request an ioctl which can't be handled by
784 * the generic interface code. If not defined ioctl's return
785 * not supported error code.
786 *
787 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
788 * Used to set network devices bus interface parameters. This interface
789 * is retained for legacy reason, new devices should use the bus
790 * interface (PCI) for low level management.
791 *
792 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
793 * Called when a user wants to change the Maximum Transfer Unit
794 * of a device. If not defined, any request to change MTU will
795 * will return an error.
796 *
Stephen Hemminger00829822008-11-20 20:14:53 -0800797 * void (*ndo_tx_timeout)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800798 * Callback uses when the transmitter has not made any progress
799 * for dev->watchdog ticks.
800 *
Ben Hutchings3cfde792010-07-09 09:11:52 +0000801 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
Eric Dumazet28172732010-07-07 14:58:56 -0700802 * struct rtnl_link_stats64 *storage);
Wolfram Sangd308e382009-10-07 13:53:11 -0700803 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800804 * Called when a user wants to get the network device usage
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000805 * statistics. Drivers must do one of the following:
Ben Hutchings3cfde792010-07-09 09:11:52 +0000806 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
807 * rtnl_link_stats64 structure passed by the caller.
Ben Hutchings82695d92010-06-15 15:08:48 -0700808 * 2. Define @ndo_get_stats to update a net_device_stats structure
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000809 * (which should normally be dev->stats) and return a pointer to
810 * it. The structure may be changed asynchronously only if each
811 * field is written atomically.
812 * 3. Update dev->stats asynchronously and atomically, and define
813 * neither operation.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800814 *
Patrick McHardy80d5c362013-04-19 02:04:28 +0000815 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
816 * If device support VLAN filtering this function is called when a
817 * VLAN id is registered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800818 *
Jiri Pirko8e586132011-12-08 19:52:37 -0500819 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000820 * If device support VLAN filtering this function is called when a
821 * VLAN id is unregistered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800822 *
823 * void (*ndo_poll_controller)(struct net_device *dev);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000824 *
825 * SR-IOV management functions.
826 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
827 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
828 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
Greg Rose5f8444a2011-10-08 03:05:24 +0000829 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000830 * int (*ndo_get_vf_config)(struct net_device *dev,
831 * int vf, struct ifla_vf_info *ivf);
Scott Feldman57b61082010-05-17 22:49:55 -0700832 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
833 * struct nlattr *port[]);
834 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +0000835 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
836 * Called to setup 'tc' number of traffic classes in the net device. This
837 * is always called from the stack with the rtnl lock held and netif tx
838 * queues stopped. This allows the netdevice to perform queue management
839 * safely.
Ben Hutchingsc4454772011-01-19 11:03:53 +0000840 *
Yi Zoue9bce842011-03-09 08:48:03 +0000841 * Fiber Channel over Ethernet (FCoE) offload functions.
842 * int (*ndo_fcoe_enable)(struct net_device *dev);
843 * Called when the FCoE protocol stack wants to start using LLD for FCoE
844 * so the underlying device can perform whatever needed configuration or
845 * initialization to support acceleration of FCoE traffic.
846 *
847 * int (*ndo_fcoe_disable)(struct net_device *dev);
848 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
849 * so the underlying device can perform whatever needed clean-ups to
850 * stop supporting acceleration of FCoE traffic.
851 *
852 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
853 * struct scatterlist *sgl, unsigned int sgc);
854 * Called when the FCoE Initiator wants to initialize an I/O that
855 * is a possible candidate for Direct Data Placement (DDP). The LLD can
856 * perform necessary setup and returns 1 to indicate the device is set up
857 * successfully to perform DDP on this I/O, otherwise this returns 0.
858 *
859 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
860 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
861 * indicated by the FC exchange id 'xid', so the underlying device can
862 * clean up and reuse resources for later DDP requests.
863 *
864 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
865 * struct scatterlist *sgl, unsigned int sgc);
866 * Called when the FCoE Target wants to initialize an I/O that
867 * is a possible candidate for Direct Data Placement (DDP). The LLD can
868 * perform necessary setup and returns 1 to indicate the device is set up
869 * successfully to perform DDP on this I/O, otherwise this returns 0.
870 *
Neerav Parikh68bad942012-01-04 20:23:39 +0000871 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
872 * struct netdev_fcoe_hbainfo *hbainfo);
873 * Called when the FCoE Protocol stack wants information on the underlying
874 * device. This information is utilized by the FCoE protocol stack to
875 * register attributes with Fiber Channel management service as per the
876 * FC-GS Fabric Device Management Information(FDMI) specification.
877 *
Yi Zoue9bce842011-03-09 08:48:03 +0000878 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
879 * Called when the underlying device wants to override default World Wide
880 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
881 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
882 * protocol stack to use.
883 *
Ben Hutchingsc4454772011-01-19 11:03:53 +0000884 * RFS acceleration.
885 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
886 * u16 rxq_index, u32 flow_id);
887 * Set hardware filter for RFS. rxq_index is the target queue index;
888 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
889 * Return the filter ID on success, or a negative error code.
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000890 *
Jiri Pirko8b98a702013-01-03 22:49:02 +0000891 * Slave management functions (for bridge, bonding, etc).
Jiri Pirkofbaec0e2011-02-13 10:15:37 +0000892 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
893 * Called to make another netdev an underling.
894 *
895 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
896 * Called to release previously enslaved netdev.
Michał Mirosław5455c692011-02-15 16:59:17 +0000897 *
898 * Feature/offload setting functions.
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000899 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
900 * netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000901 * Adjusts the requested feature flags according to device-specific
902 * constraints, and returns the resulting flags. Must not modify
903 * the device state.
904 *
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000905 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +0000906 * Called to update device configuration to new features. Passed
907 * feature set might be less than what was returned by ndo_fix_features()).
908 * Must return >0 or -errno if it changed dev->features itself.
909 *
stephen hemmingeredc7d572012-10-01 12:32:33 +0000910 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
911 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000912 * const unsigned char *addr, u16 flags)
John Fastabend77162022012-04-15 06:43:56 +0000913 * Adds an FDB entry to dev for addr.
Vlad Yasevich1690be62013-02-13 12:00:18 +0000914 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
915 * struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +0000916 * const unsigned char *addr)
John Fastabend77162022012-04-15 06:43:56 +0000917 * Deletes the FDB entry from dev coresponding to addr.
918 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
919 * struct net_device *dev, int idx)
920 * Used to add FDB entries to dump requests. Implementers should add
921 * entries to skb and update idx with the number of entries.
John Fastabende5a55a82012-10-24 08:12:57 +0000922 *
923 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
924 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
Dmitry Kravkov24f11a52013-03-27 06:54:00 +0000925 * struct net_device *dev, u32 filter_mask)
Jiri Pirko4bf84c32012-12-27 23:49:37 +0000926 *
927 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
928 * Called to change device carrier. Soft-devices (like dummy, team, etc)
929 * which do not represent real hardware may define this to allow their
930 * userspace components to manage their virtual carrier state. Devices
931 * that determine carrier state from physical hardware properties (eg
932 * network cables) or protocol-dependent mechanisms (eg
933 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800934 */
935struct net_device_ops {
936 int (*ndo_init)(struct net_device *dev);
937 void (*ndo_uninit)(struct net_device *dev);
938 int (*ndo_open)(struct net_device *dev);
939 int (*ndo_stop)(struct net_device *dev);
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000940 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
Stephen Hemminger00829822008-11-20 20:14:53 -0800941 struct net_device *dev);
942 u16 (*ndo_select_queue)(struct net_device *dev,
943 struct sk_buff *skb);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800944 void (*ndo_change_rx_flags)(struct net_device *dev,
945 int flags);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800946 void (*ndo_set_rx_mode)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800947 int (*ndo_set_mac_address)(struct net_device *dev,
948 void *addr);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800949 int (*ndo_validate_addr)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800950 int (*ndo_do_ioctl)(struct net_device *dev,
951 struct ifreq *ifr, int cmd);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800952 int (*ndo_set_config)(struct net_device *dev,
953 struct ifmap *map);
Stephen Hemminger00829822008-11-20 20:14:53 -0800954 int (*ndo_change_mtu)(struct net_device *dev,
955 int new_mtu);
956 int (*ndo_neigh_setup)(struct net_device *dev,
957 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800958 void (*ndo_tx_timeout) (struct net_device *dev);
959
Eric Dumazet28172732010-07-07 14:58:56 -0700960 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
961 struct rtnl_link_stats64 *storage);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800962 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
963
Jiri Pirko8e586132011-12-08 19:52:37 -0500964 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +0000965 __be16 proto, u16 vid);
Jiri Pirko8e586132011-12-08 19:52:37 -0500966 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +0000967 __be16 proto, u16 vid);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800968#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800969 void (*ndo_poll_controller)(struct net_device *dev);
Herbert Xu4247e162010-06-10 16:12:47 +0000970 int (*ndo_netpoll_setup)(struct net_device *dev,
Amerigo Wang47be03a22012-08-10 01:24:37 +0000971 struct netpoll_info *info,
972 gfp_t gfp);
WANG Cong0e34e932010-05-06 00:47:21 -0700973 void (*ndo_netpoll_cleanup)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800974#endif
Eliezer Tamir06021292013-06-10 11:39:50 +0300975#ifdef CONFIG_NET_LL_RX_POLL
976 int (*ndo_ll_poll)(struct napi_struct *dev);
977#endif
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000978 int (*ndo_set_vf_mac)(struct net_device *dev,
979 int queue, u8 *mac);
980 int (*ndo_set_vf_vlan)(struct net_device *dev,
981 int queue, u16 vlan, u8 qos);
982 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
983 int vf, int rate);
Greg Rose5f8444a2011-10-08 03:05:24 +0000984 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
985 int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +0000986 int (*ndo_get_vf_config)(struct net_device *dev,
987 int vf,
988 struct ifla_vf_info *ivf);
Scott Feldman57b61082010-05-17 22:49:55 -0700989 int (*ndo_set_vf_port)(struct net_device *dev,
990 int vf,
991 struct nlattr *port[]);
992 int (*ndo_get_vf_port)(struct net_device *dev,
993 int vf, struct sk_buff *skb);
John Fastabend4f57c082011-01-17 08:06:04 +0000994 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000995#if IS_ENABLED(CONFIG_FCOE)
Yi Zoucb454392009-08-31 12:31:36 +0000996 int (*ndo_fcoe_enable)(struct net_device *dev);
997 int (*ndo_fcoe_disable)(struct net_device *dev);
Yi Zou4d288d52009-02-27 14:06:59 -0800998 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
999 u16 xid,
1000 struct scatterlist *sgl,
1001 unsigned int sgc);
1002 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1003 u16 xid);
Yi Zou6247e082011-02-01 07:22:06 +00001004 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1005 u16 xid,
1006 struct scatterlist *sgl,
1007 unsigned int sgc);
Neerav Parikh68bad942012-01-04 20:23:39 +00001008 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1009 struct netdev_fcoe_hbainfo *hbainfo);
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001010#endif
1011
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001012#if IS_ENABLED(CONFIG_LIBFCOE)
Yi Zoudf5c7942009-10-28 18:24:35 +00001013#define NETDEV_FCOE_WWNN 0
1014#define NETDEV_FCOE_WWPN 1
1015 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1016 u64 *wwn, int type);
Yi Zou4d288d52009-02-27 14:06:59 -08001017#endif
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001018
Ben Hutchingsc4454772011-01-19 11:03:53 +00001019#ifdef CONFIG_RFS_ACCEL
1020 int (*ndo_rx_flow_steer)(struct net_device *dev,
1021 const struct sk_buff *skb,
1022 u16 rxq_index,
1023 u32 flow_id);
1024#endif
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001025 int (*ndo_add_slave)(struct net_device *dev,
1026 struct net_device *slave_dev);
1027 int (*ndo_del_slave)(struct net_device *dev,
1028 struct net_device *slave_dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001029 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1030 netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001031 int (*ndo_set_features)(struct net_device *dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001032 netdev_features_t features);
David Millerda6a8fa2011-07-25 00:01:38 +00001033 int (*ndo_neigh_construct)(struct neighbour *n);
David S. Miller447f2192011-12-19 15:04:41 -05001034 void (*ndo_neigh_destroy)(struct neighbour *n);
John Fastabend77162022012-04-15 06:43:56 +00001035
1036 int (*ndo_fdb_add)(struct ndmsg *ndm,
stephen hemmingeredc7d572012-10-01 12:32:33 +00001037 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001038 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001039 const unsigned char *addr,
John Fastabend77162022012-04-15 06:43:56 +00001040 u16 flags);
1041 int (*ndo_fdb_del)(struct ndmsg *ndm,
Vlad Yasevich1690be62013-02-13 12:00:18 +00001042 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001043 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001044 const unsigned char *addr);
John Fastabend77162022012-04-15 06:43:56 +00001045 int (*ndo_fdb_dump)(struct sk_buff *skb,
1046 struct netlink_callback *cb,
1047 struct net_device *dev,
1048 int idx);
John Fastabende5a55a82012-10-24 08:12:57 +00001049
1050 int (*ndo_bridge_setlink)(struct net_device *dev,
1051 struct nlmsghdr *nlh);
1052 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1053 u32 pid, u32 seq,
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00001054 struct net_device *dev,
1055 u32 filter_mask);
Vlad Yasevich407af322013-02-13 12:00:12 +00001056 int (*ndo_bridge_dellink)(struct net_device *dev,
1057 struct nlmsghdr *nlh);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001058 int (*ndo_change_carrier)(struct net_device *dev,
1059 bool new_carrier);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001060};
1061
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001062/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 * The DEVICE structure.
1064 * Actually, this whole structure is a big mistake. It mixes I/O
1065 * data with strictly "high-level" data, and it has to know about
1066 * almost every data structure used in the INET module.
1067 *
1068 * FIXME: cleanup struct net_device such that network protocol info
1069 * moves out.
1070 */
1071
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001072struct net_device {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074 /*
1075 * This is the first field of the "visible" part of this structure
1076 * (i.e. as seen by users in the "Space.c" file). It is the name
Justin P. Mattock724df612010-05-26 09:22:40 -07001077 * of the interface.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 */
1079 char name[IFNAMSIZ];
Mark Grossed771342010-05-06 01:59:26 +02001080
Eric Dumazet91364612012-06-11 06:36:13 +00001081 /* device name hash chain, please keep it close to name[] */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001082 struct hlist_node name_hlist;
Eric Dumazet91364612012-06-11 06:36:13 +00001083
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001084 /* snmp alias */
1085 char *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
1087 /*
1088 * I/O specific fields
1089 * FIXME: Merge these and struct ifmap into one
1090 */
1091 unsigned long mem_end; /* shared mem end */
1092 unsigned long mem_start; /* shared mem start */
1093 unsigned long base_addr; /* device I/O address */
1094 unsigned int irq; /* device IRQ number */
1095
1096 /*
1097 * Some hardware also needs these fields, but they are not
1098 * part of the usual set specified in Space.c.
1099 */
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 unsigned long state;
1102
Pavel Emelianov7562f872007-05-03 15:13:45 -07001103 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001104 struct list_head napi_list;
Eric Dumazet44a08732009-10-27 07:03:04 +00001105 struct list_head unreg_list;
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001106 struct list_head upper_dev_list; /* List of upper devices */
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Michał Mirosław5455c692011-02-15 16:59:17 +00001109 /* currently active device features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001110 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00001111 /* user-changeable features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001112 netdev_features_t hw_features;
Michał Mirosław5455c692011-02-15 16:59:17 +00001113 /* user-requested features */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001114 netdev_features_t wanted_features;
Michał Mirosław1aac6262011-04-12 04:07:39 +00001115 /* mask of features inheritable by VLAN devices */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001116 netdev_features_t vlan_features;
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00001117 /* mask of features inherited by encapsulating devices
1118 * This field indicates what encapsulation offloads
1119 * the hardware is capable of doing, and drivers will
1120 * need to set them appropriately.
1121 */
1122 netdev_features_t hw_enc_features;
Simon Horman0d89d202013-05-23 21:02:52 +00001123 /* mask of fetures inheritable by MPLS */
1124 netdev_features_t mpls_features;
Michał Mirosław04ed3e72011-01-24 15:32:47 -08001125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 /* Interface index. Unique device identifier */
1127 int ifindex;
1128 int iflink;
1129
Rusty Russellc45d2862007-03-28 14:29:08 -07001130 struct net_device_stats stats;
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001131 atomic_long_t rx_dropped; /* dropped packets by core network
1132 * Do not use this in drivers.
1133 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Johannes Bergb86e0282007-04-26 20:48:23 -07001135#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 /* List of functions to handle Wireless Extensions (instead of ioctl).
1137 * See <net/iw_handler.h> for details. Jean II */
1138 const struct iw_handler_def * wireless_handlers;
1139 /* Instance data managed by the core of Wireless Extensions. */
1140 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -07001141#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001142 /* Management operations */
1143 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -07001144 const struct ethtool_ops *ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001146 /* Hardware header description */
1147 const struct header_ops *header_ops;
1148
Stefan Rompfb00055a2006-03-20 17:09:11 -08001149 unsigned int flags; /* interface flags (a la BSD) */
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001150 unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
1151 * See if.h for definitions. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 unsigned short gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 unsigned short padded; /* How much padding added by alloc_netdev() */
1154
Stefan Rompfb00055a2006-03-20 17:09:11 -08001155 unsigned char operstate; /* RFC2863 operstate */
1156 unsigned char link_mode; /* mapping policy to operstate */
1157
Joe Perchesbdc220d2011-05-09 17:42:46 +00001158 unsigned char if_port; /* Selectable AUI, TP,..*/
1159 unsigned char dma; /* DMA channel */
1160
David S. Millercd7b5392010-05-02 22:27:59 -07001161 unsigned int mtu; /* interface MTU value */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 unsigned short type; /* interface hardware type */
1163 unsigned short hard_header_len; /* hardware hdr length */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Johannes Bergf5184d22008-05-12 20:48:31 -07001165 /* extra head- and tailroom the hardware may need, but not in all cases
1166 * can this be guaranteed, especially tailroom. Some cases also use
1167 * LL_MAX_HEADER instead to allocate the skb.
1168 */
1169 unsigned short needed_headroom;
1170 unsigned short needed_tailroom;
1171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 /* Interface address info. */
Jon Wetzela6f9a702005-08-20 17:15:54 -07001173 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
Stefan Assmannc1f79422010-07-22 02:50:21 +00001174 unsigned char addr_assign_type; /* hw address assignment type */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 unsigned char addr_len; /* hardware address length */
David Miller596b9b62011-07-25 00:01:25 +00001176 unsigned char neigh_priv_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 unsigned short dev_id; /* for shared network cards */
1178
Jiri Pirkoccffad252009-05-22 23:22:17 +00001179 spinlock_t addr_list_lock;
Jiri Pirko22bedad32010-04-01 21:22:57 +00001180 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1181 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001182 struct netdev_hw_addr_list dev_addrs; /* list of device
1183 * hw addresses
1184 */
1185#ifdef CONFIG_SYSFS
1186 struct kset *queues_kset;
1187#endif
1188
Joe Perches2d348d12011-07-25 16:17:35 -07001189 bool uc_promisc;
Wang Chen9d45abe2008-06-17 21:12:48 -07001190 unsigned int promiscuity;
1191 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
1194 /* Protocol specific pointers */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001195
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001196#if IS_ENABLED(CONFIG_VLAN_8021Q)
Jiri Pirko5b9ea6e2011-12-08 04:11:18 +00001197 struct vlan_info __rcu *vlan_info; /* VLAN info */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001198#endif
Ben Hutchings34a430d2011-11-25 14:38:38 +00001199#if IS_ENABLED(CONFIG_NET_DSA)
Ben Hutchingscf50dcc2011-11-25 14:32:52 +00001200 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001201#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 void *atalk_ptr; /* AppleTalk link */
Eric Dumazet95ae6b22010-09-15 04:04:31 +00001203 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
Eric Dumazetfc766e4c2010-10-29 03:09:24 +00001204 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
Eric Dumazet198caec2010-10-24 21:32:05 +00001205 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 void *ax25_ptr; /* AX.25 specific data */
Johannes Berg704232c2007-04-23 12:20:05 -07001207 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1208 assign before registering */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001210/*
Eric Dumazetcd135392010-09-16 02:58:13 +00001211 * Cache lines mostly used on receive path (including eth_type_trans())
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001212 */
Eric Dumazet4dc89132010-08-31 07:40:16 +00001213 unsigned long last_rx; /* Time of last Rx
1214 * This should not be set in
1215 * drivers, unless really needed,
1216 * because network stack (bonding)
1217 * use it if/when necessary, to
1218 * avoid dirtying this cache line.
1219 */
1220
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001221 /* Interface address info used in eth_type_trans() */
Jiri Pirkof001fde2009-05-05 02:48:28 +00001222 unsigned char *dev_addr; /* hw address, (before bcast
1223 because most packets are
1224 unicast) */
1225
Tom Herbert0a9627f2010-03-16 08:03:29 +00001226
david decotignyccf5ff62011-11-16 12:15:10 +00001227#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001228 struct netdev_rx_queue *_rx;
1229
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001230 /* Number of RX queues allocated at register_netdev() time */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001231 unsigned int num_rx_queues;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001232
1233 /* Number of RX queues currently active in device */
1234 unsigned int real_num_rx_queues;
Ben Hutchingsc4454772011-01-19 11:03:53 +00001235
Eric Dumazetdf334542010-03-24 19:13:54 +00001236#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001237
stephen hemminger61391cd2010-11-15 06:38:12 +00001238 rx_handler_func_t __rcu *rx_handler;
1239 void __rcu *rx_handler_data;
David S. Millere8a04642008-07-17 00:34:19 -07001240
Eric Dumazet24824a02010-10-02 06:11:55 +00001241 struct netdev_queue __rcu *ingress_queue;
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001242 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1243
Eric Dumazetcd135392010-09-16 02:58:13 +00001244
1245/*
1246 * Cache lines mostly used on transmit path
1247 */
David S. Millere8a04642008-07-17 00:34:19 -07001248 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001249
1250 /* Number of TX queues allocated at alloc_netdev_mq() time */
David S. Millere8a04642008-07-17 00:34:19 -07001251 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001252
1253 /* Number of TX queues currently active in device */
1254 unsigned int real_num_tx_queues;
1255
Patrick McHardyaf356af2009-09-04 06:41:18 +00001256 /* root qdisc from userspace point of view */
1257 struct Qdisc *qdisc;
1258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 unsigned long tx_queue_len; /* Max frames per queue allowed */
David S. Millerc3f26a22008-07-31 16:58:50 -07001260 spinlock_t tx_global_lock;
Eric Dumazetcd135392010-09-16 02:58:13 +00001261
Tom Herbertbf264142010-11-26 08:36:09 +00001262#ifdef CONFIG_XPS
Eric Dumazeta4177862010-11-28 21:43:02 +00001263 struct xps_dev_maps __rcu *xps_maps;
Tom Herbertbf264142010-11-26 08:36:09 +00001264#endif
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001265#ifdef CONFIG_RFS_ACCEL
1266 /* CPU reverse-mapping for RX completion interrupts, indexed
1267 * by RX queue number. Assigned by driver. This must only be
1268 * set if the ndo_rx_flow_steer operation is defined. */
1269 struct cpu_rmap *rx_cpu_rmap;
1270#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001271
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001272 /* These may be needed for future network-power-down code. */
Eric Dumazet9d214932009-05-17 20:55:16 -07001273
1274 /*
1275 * trans_start here is expensive for high speed devices on SMP,
1276 * please use netdev_queue->trans_start instead.
1277 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001278 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1279
1280 int watchdog_timeo; /* used by dev_watchdog() */
1281 struct timer_list watchdog_timer;
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 /* Number of references to this device */
Eric Dumazet29b44332010-10-11 10:22:12 +00001284 int __percpu *pcpu_refcnt;
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 /* delayed register/unregister */
1287 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 /* device index hash chain */
1289 struct hlist_node index_hlist;
1290
Eric Dumazete014deb2009-11-17 05:59:21 +00001291 struct list_head link_watch_list;
Herbert Xu572a1032007-05-08 18:34:17 -07001292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 /* register/unregister state machine */
1294 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07001295 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 NETREG_UNREGISTERING, /* called unregister_netdevice */
1297 NETREG_UNREGISTERED, /* completed unregister todo */
1298 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001299 NETREG_DUMMY, /* dummy device for NAPI poll */
Eric Dumazet449f4542011-05-19 12:24:16 +00001300 } reg_state:8;
1301
1302 bool dismantle; /* device is going do be freed */
Patrick McHardya2835762010-02-26 06:34:51 +00001303
1304 enum {
1305 RTNL_LINK_INITIALIZED,
1306 RTNL_LINK_INITIALIZING,
1307 } rtnl_link_state:16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001309 /* Called from unregister, can be used to call free_netdev */
1310 void (*destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312#ifdef CONFIG_NETPOLL
Cong Wang5fbee842013-01-22 21:29:39 +00001313 struct netpoll_info __rcu *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314#endif
David S. Millereae792b2008-07-15 03:03:33 -07001315
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001316#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001317 /* Network namespace this network device is inside */
1318 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001319#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001320
David S. Miller49517042008-05-12 03:29:11 -07001321 /* mid-layer private */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001322 union {
1323 void *ml_priv;
1324 struct pcpu_lstats __percpu *lstats; /* loopback stats */
Eric Dumazet290b8952010-09-27 00:33:35 +00001325 struct pcpu_tstats __percpu *tstats; /* tunnel stats */
Eric Dumazet6d81f412010-09-27 20:50:33 +00001326 struct pcpu_dstats __percpu *dstats; /* dummy stats */
Eric Dumazet26811282012-12-29 16:02:43 +00001327 struct pcpu_vstats __percpu *vstats; /* veth stats */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001328 };
Patrick McHardyeca9eba2008-07-05 21:26:13 -07001329 /* GARP */
Eric Dumazet3cc77ec2010-10-24 21:32:36 +00001330 struct garp_port __rcu *garp_port;
David Wardfebf0182013-02-08 17:17:06 +00001331 /* MRP */
1332 struct mrp_port __rcu *mrp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 /* class/net/name entry */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001335 struct device dev;
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001336 /* space for optional device, statistics, and wireless sysfs groups */
1337 const struct attribute_group *sysfs_groups[4];
Patrick McHardy38f7b872007-06-13 12:03:51 -07001338
1339 /* rtnetlink link ops */
1340 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001341
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001342 /* for setting kernel sock attribute on TCP connection setup */
1343#define GSO_MAX_SIZE 65536
1344 unsigned int gso_max_size;
Ben Hutchings30b678d2012-07-30 15:57:00 +00001345#define GSO_MAX_SEGS 65535
1346 u16 gso_max_segs;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001347
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08001348#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08001349 /* Data Center Bridging netlink ops */
Stephen Hemminger32953542009-10-05 06:01:03 +00001350 const struct dcbnl_rtnl_ops *dcbnl_ops;
Alexander Duyck2f90b862008-11-20 20:52:10 -08001351#endif
John Fastabend4f57c082011-01-17 08:06:04 +00001352 u8 num_tc;
1353 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1354 u8 prio_tc_map[TC_BITMASK + 1];
Alexander Duyck2f90b862008-11-20 20:52:10 -08001355
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001356#if IS_ENABLED(CONFIG_FCOE)
Yi Zou4d288d52009-02-27 14:06:59 -08001357 /* max exchange id for FCoE LRO by ddp */
1358 unsigned int fcoe_ddp_xid;
1359#endif
Neil Horman5bc14212011-11-22 05:10:51 +00001360#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1361 struct netprio_map __rcu *priomap;
1362#endif
Richard Cochranc1f19b52010-07-17 08:49:36 +00001363 /* phy device may attach itself for hardware timestamping */
1364 struct phy_device *phydev;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00001365
Eric Dumazet23d3b8b2012-09-05 01:02:56 +00001366 struct lock_class_key *qdisc_tx_busylock;
1367
Vlad Dogarucbda10f2011-01-13 23:38:30 +00001368 /* group the device belongs to */
1369 int group;
Eric Dumazet91364612012-06-11 06:36:13 +00001370
1371 struct pm_qos_request pm_qos_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001373#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
David S. Millere8a04642008-07-17 00:34:19 -07001377static inline
John Fastabend4f57c082011-01-17 08:06:04 +00001378int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1379{
1380 return dev->prio_tc_map[prio & TC_BITMASK];
1381}
1382
1383static inline
1384int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1385{
1386 if (tc >= dev->num_tc)
1387 return -EINVAL;
1388
1389 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1390 return 0;
1391}
1392
1393static inline
1394void netdev_reset_tc(struct net_device *dev)
1395{
1396 dev->num_tc = 0;
1397 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1398 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1399}
1400
1401static inline
1402int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1403{
1404 if (tc >= dev->num_tc)
1405 return -EINVAL;
1406
1407 dev->tc_to_txq[tc].count = count;
1408 dev->tc_to_txq[tc].offset = offset;
1409 return 0;
1410}
1411
1412static inline
1413int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1414{
1415 if (num_tc > TC_MAX_QUEUE)
1416 return -EINVAL;
1417
1418 dev->num_tc = num_tc;
1419 return 0;
1420}
1421
1422static inline
1423int netdev_get_num_tc(struct net_device *dev)
1424{
1425 return dev->num_tc;
1426}
1427
1428static inline
David S. Millere8a04642008-07-17 00:34:19 -07001429struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1430 unsigned int index)
1431{
1432 return &dev->_tx[index];
1433}
1434
1435static inline void netdev_for_each_tx_queue(struct net_device *dev,
1436 void (*f)(struct net_device *,
1437 struct netdev_queue *,
1438 void *),
1439 void *arg)
1440{
1441 unsigned int i;
1442
1443 for (i = 0; i < dev->num_tx_queues; i++)
1444 f(dev, &dev->_tx[i], arg);
1445}
1446
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00001447extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1448 struct sk_buff *skb);
Alexander Duyck416186f2013-01-10 08:56:51 +00001449extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00001450
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001451/*
1452 * Net namespace inlines
1453 */
1454static inline
1455struct net *dev_net(const struct net_device *dev)
1456{
Eric Dumazetc2d9ba92010-06-01 06:51:19 +00001457 return read_pnet(&dev->nd_net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001458}
1459
1460static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -07001461void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001462{
1463#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -07001464 release_net(dev->nd_net);
1465 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001466#endif
1467}
1468
Lennert Buytenhekcf85d082008-10-07 13:45:02 +00001469static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1470{
1471#ifdef CONFIG_NET_DSA_TAG_DSA
1472 if (dev->dsa_ptr != NULL)
1473 return dsa_uses_dsa_tags(dev->dsa_ptr);
1474#endif
1475
1476 return 0;
1477}
1478
Lennert Buytenhek396138f2008-10-07 13:46:07 +00001479static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1480{
1481#ifdef CONFIG_NET_DSA_TAG_TRAILER
1482 if (dev->dsa_ptr != NULL)
1483 return dsa_uses_trailer_tags(dev->dsa_ptr);
1484#endif
1485
1486 return 0;
1487}
1488
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001489/**
1490 * netdev_priv - access network device private data
1491 * @dev: network device
1492 *
1493 * Get network device private data
1494 */
Patrick McHardy6472ce62007-06-13 12:03:21 -07001495static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00001497 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500/* Set the sysfs physical device reference for the network logical device
1501 * if set prior to registration will cause a symlink during initialization.
1502 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001503#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
Marcel Holtmann384912e2009-08-31 21:08:19 +00001505/* Set the sysfs device type for the network logical device to allow
1506 * fin grained indentification of different network device types. For
1507 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1508 */
1509#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1510
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00001511/* Default NAPI poll() weight
1512 * Device drivers are strongly advised to not use bigger value
1513 */
1514#define NAPI_POLL_WEIGHT 64
1515
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07001516/**
1517 * netif_napi_add - initialize a napi context
1518 * @dev: network device
1519 * @napi: napi context
1520 * @poll: polling function
1521 * @weight: default weight
1522 *
1523 * netif_napi_add() must be used to initialize a napi context prior to calling
1524 * *any* of the other napi related functions.
1525 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001526void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1527 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001528
Alexander Duyckd8156532008-07-08 15:13:05 -07001529/**
1530 * netif_napi_del - remove a napi context
1531 * @napi: napi context
1532 *
1533 * netif_napi_del() removes a napi context from the network device napi list
1534 */
Herbert Xud565b0a2008-12-15 23:38:52 -08001535void netif_napi_del(struct napi_struct *napi);
1536
1537struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00001538 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1539 void *frag0;
1540
Herbert Xu74895942009-05-26 18:50:27 +00001541 /* Length of frag0. */
1542 unsigned int frag0_len;
1543
Herbert Xu86911732009-01-29 14:19:50 +00001544 /* This indicates where we are processing relative to skb->data. */
1545 int data_offset;
1546
Herbert Xud565b0a2008-12-15 23:38:52 -08001547 /* This is non-zero if the packet cannot be merged with the new skb. */
1548 int flush;
1549
1550 /* Number of segments aggregated. */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001551 u16 count;
1552
1553 /* This is non-zero if the packet may be of the same flow. */
1554 u8 same_flow;
Herbert Xu5d38a072009-01-04 16:13:40 -08001555
1556 /* Free the skb? */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001557 u8 free;
Eric Dumazetd7e88832012-04-30 08:10:34 +00001558#define NAPI_GRO_FREE 1
1559#define NAPI_GRO_FREE_STOLEN_HEAD 2
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00001560
1561 /* jiffies when first packet was created/queued */
1562 unsigned long age;
Eric Dumazet86347242012-10-08 21:38:50 +02001563
1564 /* Used in ipv6_gro_receive() */
1565 int proto;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00001566
1567 /* used in skb_gro_receive() slow path */
1568 struct sk_buff *last;
Herbert Xud565b0a2008-12-15 23:38:52 -08001569};
1570
1571#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07001572
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001574 __be16 type; /* This is really htons(ether_type). */
1575 struct net_device *dev; /* NULL is wildcarded here */
1576 int (*func) (struct sk_buff *,
1577 struct net_device *,
1578 struct packet_type *,
1579 struct net_device *);
Eric Leblondc0de08d2012-08-16 22:02:58 +00001580 bool (*id_match)(struct packet_type *ptype,
1581 struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 void *af_packet_priv;
1583 struct list_head list;
1584};
1585
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001586struct offload_callbacks {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1588 netdev_features_t features);
1589 int (*gso_send_check)(struct sk_buff *skb);
1590 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1591 struct sk_buff *skb);
1592 int (*gro_complete)(struct sk_buff *skb);
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00001593};
1594
1595struct packet_offload {
1596 __be16 type; /* This is really htons(ether_type). */
1597 struct offload_callbacks callbacks;
1598 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599};
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601#include <linux/notifier.h>
1602
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001603/* netdevice notifier chain. Please remember to update the rtnetlink
1604 * notification exclusion list in rtnetlink_event() when adding new
1605 * types.
1606 */
1607#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1608#define NETDEV_DOWN 0x0002
1609#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1610 detected a hardware crash and restarted
1611 - we can use this eg to kick tcp sessions
1612 once done */
1613#define NETDEV_CHANGE 0x0004 /* Notify device state change */
1614#define NETDEV_REGISTER 0x0005
1615#define NETDEV_UNREGISTER 0x0006
1616#define NETDEV_CHANGEMTU 0x0007
1617#define NETDEV_CHANGEADDR 0x0008
1618#define NETDEV_GOING_DOWN 0x0009
1619#define NETDEV_CHANGENAME 0x000A
1620#define NETDEV_FEAT_CHANGE 0x000B
1621#define NETDEV_BONDING_FAILOVER 0x000C
1622#define NETDEV_PRE_UP 0x000D
1623#define NETDEV_PRE_TYPE_CHANGE 0x000E
1624#define NETDEV_POST_TYPE_CHANGE 0x000F
1625#define NETDEV_POST_INIT 0x0010
Eric Dumazet0115e8e2012-08-22 17:19:46 +00001626#define NETDEV_UNREGISTER_FINAL 0x0011
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001627#define NETDEV_RELEASE 0x0012
1628#define NETDEV_NOTIFY_PEERS 0x0013
1629#define NETDEV_JOIN 0x0014
Jiri Pirko42e52bf2013-05-25 04:12:10 +00001630#define NETDEV_CHANGEUPPER 0x0015
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001631
1632extern int register_netdevice_notifier(struct notifier_block *nb);
1633extern int unregister_netdevice_notifier(struct notifier_block *nb);
Jiri Pirko351638e2013-05-28 01:30:21 +00001634
1635struct netdev_notifier_info {
1636 struct net_device *dev;
1637};
1638
Jiri Pirkobe9efd32013-05-28 01:30:22 +00001639struct netdev_notifier_change_info {
1640 struct netdev_notifier_info info; /* must be first */
1641 unsigned int flags_changed;
1642};
1643
Cong Wang75538c22013-05-29 11:30:50 +08001644static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1645 struct net_device *dev)
1646{
1647 info->dev = dev;
1648}
1649
Jiri Pirko351638e2013-05-28 01:30:21 +00001650static inline struct net_device *
1651netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1652{
1653 return info->dev;
1654}
1655
1656extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1657 struct netdev_notifier_info *info);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07001658extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1659
1660
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661extern rwlock_t dev_base_lock; /* Device list lock */
1662
Eric Dumazet30e6c9fa2012-12-20 17:25:08 +00001663extern seqcount_t devnet_rename_seq; /* Device rename seq */
Brian Haleyc91f6df2012-11-26 05:21:08 +00001664
Eric W. Biederman881d9662007-09-17 11:56:21 -07001665
1666#define for_each_netdev(net, d) \
1667 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
Eric W. Biedermandcbccbd42009-11-29 22:25:26 +00001668#define for_each_netdev_reverse(net, d) \
1669 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08001670#define for_each_netdev_rcu(net, d) \
1671 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
Eric W. Biederman881d9662007-09-17 11:56:21 -07001672#define for_each_netdev_safe(net, d, n) \
1673 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1674#define for_each_netdev_continue(net, d) \
1675 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
stephen hemminger254245d2009-11-10 07:54:47 +00001676#define for_each_netdev_continue_rcu(net, d) \
1677 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00001678#define for_each_netdev_in_bond_rcu(bond, slave) \
1679 for_each_netdev_rcu(&init_net, slave) \
1680 if (netdev_master_upper_dev_get_rcu(slave) == bond)
Pavel Emelianov7562f872007-05-03 15:13:45 -07001681#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1682
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001683static inline struct net_device *next_net_device(struct net_device *dev)
1684{
1685 struct list_head *lh;
1686 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07001687
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001688 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001689 lh = dev->dev_list.next;
1690 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1691}
1692
Eric Dumazetce81b762009-11-11 17:34:30 +00001693static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1694{
1695 struct list_head *lh;
1696 struct net *net;
1697
1698 net = dev_net(dev);
Eric Dumazetccf43432011-01-26 18:08:02 +00001699 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
Eric Dumazetce81b762009-11-11 17:34:30 +00001700 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1701}
1702
Daniel Lezcanoa050c332007-09-12 14:57:09 +02001703static inline struct net_device *first_net_device(struct net *net)
1704{
1705 return list_empty(&net->dev_base_head) ? NULL :
1706 net_device_entry(net->dev_base_head.next);
1707}
Pavel Emelianov7562f872007-05-03 15:13:45 -07001708
Eric Dumazetccf43432011-01-26 18:08:02 +00001709static inline struct net_device *first_net_device_rcu(struct net *net)
1710{
1711 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1712
1713 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1714}
1715
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716extern int netdev_boot_setup_check(struct net_device *dev);
1717extern unsigned long netdev_boot_base(const char *prefix, int unit);
Eric Dumazet941666c2010-12-05 01:23:53 +00001718extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1719 const char *hwaddr);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001720extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1721extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722extern void dev_add_pack(struct packet_type *pt);
1723extern void dev_remove_pack(struct packet_type *pt);
1724extern void __dev_remove_pack(struct packet_type *pt);
Vlad Yasevich62532da2012-11-15 08:49:10 +00001725extern void dev_add_offload(struct packet_offload *po);
1726extern void dev_remove_offload(struct packet_offload *po);
1727extern void __dev_remove_offload(struct packet_offload *po);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Eric Dumazetbb69ae02010-06-07 11:42:13 +00001729extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1730 unsigned short mask);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001731extern struct net_device *dev_get_by_name(struct net *net, const char *name);
Eric Dumazet72c95282009-10-30 07:11:27 +00001732extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001733extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734extern int dev_alloc_name(struct net_device *dev, const char *name);
1735extern int dev_open(struct net_device *dev);
1736extern int dev_close(struct net_device *dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001737extern void dev_disable_lro(struct net_device *dev);
Michel Machado95603e22012-06-12 10:16:35 +00001738extern int dev_loopback_xmit(struct sk_buff *newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739extern int dev_queue_xmit(struct sk_buff *skb);
1740extern int register_netdevice(struct net_device *dev);
Eric Dumazet44a08732009-10-27 07:03:04 +00001741extern void unregister_netdevice_queue(struct net_device *dev,
1742 struct list_head *head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00001743extern void unregister_netdevice_many(struct list_head *head);
Eric Dumazet44a08732009-10-27 07:03:04 +00001744static inline void unregister_netdevice(struct net_device *dev)
1745{
1746 unregister_netdevice_queue(dev, NULL);
1747}
1748
Eric Dumazet29b44332010-10-11 10:22:12 +00001749extern int netdev_refcnt_read(const struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750extern void free_netdev(struct net_device *dev);
1751extern void synchronize_net(void);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001752extern int init_dummy_netdev(struct net_device *dev);
1753
Eric W. Biederman881d9662007-09-17 11:56:21 -07001754extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1755extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00001756extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757extern int dev_restart(struct net_device *dev);
1758#ifdef CONFIG_NETPOLL_TRAP
1759extern int netpoll_trap(void);
1760#endif
Herbert Xu86911732009-01-29 14:19:50 +00001761extern int skb_gro_receive(struct sk_buff **head,
1762 struct sk_buff *skb);
1763
1764static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1765{
1766 return NAPI_GRO_CB(skb)->data_offset;
1767}
1768
1769static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1770{
1771 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1772}
1773
1774static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1775{
1776 NAPI_GRO_CB(skb)->data_offset += len;
1777}
1778
Herbert Xua5b1cf22009-05-26 18:50:28 +00001779static inline void *skb_gro_header_fast(struct sk_buff *skb,
1780 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00001781{
Herbert Xu78a478d2009-05-26 18:50:21 +00001782 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00001783}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Herbert Xua5b1cf22009-05-26 18:50:28 +00001785static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1786{
1787 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1788}
1789
1790static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1791 unsigned int offset)
1792{
Herbert Xu17dd7592011-07-27 06:16:28 -07001793 if (!pskb_may_pull(skb, hlen))
1794 return NULL;
1795
Herbert Xua5b1cf22009-05-26 18:50:28 +00001796 NAPI_GRO_CB(skb)->frag0 = NULL;
1797 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu17dd7592011-07-27 06:16:28 -07001798 return skb->data + offset;
Herbert Xua5b1cf22009-05-26 18:50:28 +00001799}
1800
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001801static inline void *skb_gro_mac_header(struct sk_buff *skb)
1802{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001803 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
Herbert Xuaa4b9f52009-02-08 18:00:37 +00001804}
1805
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001806static inline void *skb_gro_network_header(struct sk_buff *skb)
1807{
Herbert Xu78d3fd02009-05-26 18:50:23 +00001808 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1809 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07001810}
1811
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001812static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1813 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001814 const void *daddr, const void *saddr,
Eric Dumazet95c96172012-04-15 05:58:06 +00001815 unsigned int len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001816{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02001817 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001818 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001819
1820 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07001821}
1822
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001823static inline int dev_parse_header(const struct sk_buff *skb,
1824 unsigned char *haddr)
1825{
1826 const struct net_device *dev = skb->dev;
1827
Patrick McHardy1b833362007-10-18 05:09:28 -07001828 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001829 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001830 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001831}
1832
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1834extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1835static inline int unregister_gifconf(unsigned int family)
1836{
1837 return register_gifconf(family, NULL);
1838}
1839
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001840#ifdef CONFIG_NET_FLOW_LIMIT
1841#define FLOW_LIMIT_HISTORY (1 << 8) /* must be ^2 */
1842struct sd_flow_limit {
1843 u64 count;
1844 unsigned int num_buckets;
1845 unsigned int history_head;
1846 u16 history[FLOW_LIMIT_HISTORY];
1847 u8 buckets[];
1848};
1849
1850extern int netdev_flow_limit_table_len;
1851#endif /* CONFIG_NET_FLOW_LIMIT */
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853/*
Eric Dumazet88751272010-04-19 05:07:33 +00001854 * Incoming packets are placed on per-cpu queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001856struct softnet_data {
David S. Miller37437bb2008-07-16 02:15:04 -07001857 struct Qdisc *output_queue;
Changli Gaoa9cbd582010-04-26 23:06:24 +00001858 struct Qdisc **output_queue_tailp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 struct sk_buff *completion_queue;
Changli Gao6e7676c2010-04-27 15:07:33 -07001861 struct sk_buff_head process_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Changli Gaodee42872010-05-02 05:42:16 +00001863 /* stats */
David S. Millercd7b5392010-05-02 22:27:59 -07001864 unsigned int processed;
1865 unsigned int time_squeeze;
1866 unsigned int cpu_collision;
1867 unsigned int received_rps;
Changli Gaodee42872010-05-02 05:42:16 +00001868
Changli Gaofd793d82010-04-15 00:16:59 -07001869#ifdef CONFIG_RPS
Eric Dumazet88751272010-04-19 05:07:33 +00001870 struct softnet_data *rps_ipi_list;
1871
1872 /* Elements below can be accessed between CPUs for RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00001873 struct call_single_data csd ____cacheline_aligned_in_smp;
Eric Dumazet88751272010-04-19 05:07:33 +00001874 struct softnet_data *rps_ipi_next;
1875 unsigned int cpu;
Tom Herbertfec5e652010-04-16 16:01:27 -07001876 unsigned int input_queue_head;
Tom Herbert76cc8b12010-05-20 18:37:59 +00001877 unsigned int input_queue_tail;
Tom Herbert1e94d722010-03-18 17:45:44 -07001878#endif
Eric Dumazet95c96172012-04-15 05:58:06 +00001879 unsigned int dropped;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001880 struct sk_buff_head input_pkt_queue;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001881 struct napi_struct backlog;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00001882
1883#ifdef CONFIG_NET_FLOW_LIMIT
1884 struct sd_flow_limit *flow_limit;
1885#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886};
1887
Tom Herbert76cc8b12010-05-20 18:37:59 +00001888static inline void input_queue_head_incr(struct softnet_data *sd)
Tom Herbertfec5e652010-04-16 16:01:27 -07001889{
1890#ifdef CONFIG_RPS
Tom Herbert76cc8b12010-05-20 18:37:59 +00001891 sd->input_queue_head++;
1892#endif
1893}
1894
1895static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1896 unsigned int *qtail)
1897{
1898#ifdef CONFIG_RPS
1899 *qtail = ++sd->input_queue_tail;
Tom Herbertfec5e652010-04-16 16:01:27 -07001900#endif
1901}
1902
Tom Herbert0a9627f2010-03-16 08:03:29 +00001903DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
David S. Miller37437bb2008-07-16 02:15:04 -07001905extern void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -07001906
1907static inline void netif_schedule_queue(struct netdev_queue *txq)
1908{
Tom Herbert734664982011-11-28 16:32:44 +00001909 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
David S. Miller37437bb2008-07-16 02:15:04 -07001910 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -07001911}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001913static inline void netif_tx_schedule_all(struct net_device *dev)
1914{
1915 unsigned int i;
1916
1917 for (i = 0; i < dev->num_tx_queues; i++)
1918 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1919}
1920
Dave Jonesd29f7492008-07-22 14:09:06 -07001921static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1922{
Tom Herbert734664982011-11-28 16:32:44 +00001923 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07001924}
1925
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001926/**
1927 * netif_start_queue - allow transmit
1928 * @dev: network device
1929 *
1930 * Allow upper layers to call the device hard_start_xmit routine.
1931 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932static inline void netif_start_queue(struct net_device *dev)
1933{
David S. Millere8a04642008-07-17 00:34:19 -07001934 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935}
1936
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001937static inline void netif_tx_start_all_queues(struct net_device *dev)
1938{
1939 unsigned int i;
1940
1941 for (i = 0; i < dev->num_tx_queues; i++) {
1942 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1943 netif_tx_start_queue(txq);
1944 }
1945}
1946
David S. Miller79d16382008-07-08 23:14:46 -07001947static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948{
1949#ifdef CONFIG_NETPOLL_TRAP
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001950 if (netpoll_trap()) {
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00001951 netif_tx_start_queue(dev_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 return;
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954#endif
Tom Herbert734664982011-11-28 16:32:44 +00001955 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001956 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07001957}
1958
Dave Jonesd29f7492008-07-22 14:09:06 -07001959/**
1960 * netif_wake_queue - restart transmit
1961 * @dev: network device
1962 *
1963 * Allow upper layers to call the device hard_start_xmit routine.
1964 * Used for flow control when transmit resources are available.
1965 */
David S. Miller79d16382008-07-08 23:14:46 -07001966static inline void netif_wake_queue(struct net_device *dev)
1967{
David S. Millere8a04642008-07-17 00:34:19 -07001968 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969}
1970
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001971static inline void netif_tx_wake_all_queues(struct net_device *dev)
1972{
1973 unsigned int i;
1974
1975 for (i = 0; i < dev->num_tx_queues; i++) {
1976 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1977 netif_tx_wake_queue(txq);
1978 }
1979}
1980
Dave Jonesd29f7492008-07-22 14:09:06 -07001981static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1982{
Guillaume Chazarain18543a62010-11-06 06:39:32 +00001983 if (WARN_ON(!dev_queue)) {
Joe Perches256ee432011-03-01 07:06:12 +00001984 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
Guillaume Chazarain18543a62010-11-06 06:39:32 +00001985 return;
1986 }
Tom Herbert734664982011-11-28 16:32:44 +00001987 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07001988}
1989
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001990/**
1991 * netif_stop_queue - stop transmitted packets
1992 * @dev: network device
1993 *
1994 * Stop upper layers calling the device hard_start_xmit routine.
1995 * Used for flow control when transmit resources are unavailable.
1996 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997static inline void netif_stop_queue(struct net_device *dev)
1998{
David S. Millere8a04642008-07-17 00:34:19 -07001999 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000}
2001
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002002static inline void netif_tx_stop_all_queues(struct net_device *dev)
2003{
2004 unsigned int i;
2005
2006 for (i = 0; i < dev->num_tx_queues; i++) {
2007 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2008 netif_tx_stop_queue(txq);
2009 }
2010}
2011
David S. Miller4d295152012-03-07 21:02:35 -05002012static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07002013{
Tom Herbert734664982011-11-28 16:32:44 +00002014 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07002015}
2016
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002017/**
2018 * netif_queue_stopped - test if transmit queue is flowblocked
2019 * @dev: network device
2020 *
2021 * Test if transmit queue on device is currently unable to send.
2022 */
David S. Miller4d295152012-03-07 21:02:35 -05002023static inline bool netif_queue_stopped(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024{
David S. Millere8a04642008-07-17 00:34:19 -07002025 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026}
2027
David S. Miller4d295152012-03-07 21:02:35 -05002028static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
David S. Millerc3f26a22008-07-31 16:58:50 -07002029{
Tom Herbert734664982011-11-28 16:32:44 +00002030 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2031}
2032
David S. Miller4d295152012-03-07 21:02:35 -05002033static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
Tom Herbert734664982011-11-28 16:32:44 +00002034{
2035 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2036}
2037
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002038static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2039 unsigned int bytes)
2040{
Tom Herbert114cf582011-11-28 16:33:09 +00002041#ifdef CONFIG_BQL
2042 dql_queued(&dev_queue->dql, bytes);
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002043
2044 if (likely(dql_avail(&dev_queue->dql) >= 0))
2045 return;
2046
2047 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2048
2049 /*
2050 * The XOFF flag must be set before checking the dql_avail below,
2051 * because in netdev_tx_completed_queue we update the dql_completed
2052 * before checking the XOFF flag.
2053 */
2054 smp_mb();
2055
2056 /* check again in case another CPU has just made room avail */
2057 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2058 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002059#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002060}
2061
2062static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2063{
2064 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2065}
2066
2067static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
Eric Dumazet95c96172012-04-15 05:58:06 +00002068 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002069{
Tom Herbert114cf582011-11-28 16:33:09 +00002070#ifdef CONFIG_BQL
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00002071 if (unlikely(!bytes))
2072 return;
2073
2074 dql_completed(&dev_queue->dql, bytes);
2075
2076 /*
2077 * Without the memory barrier there is a small possiblity that
2078 * netdev_tx_sent_queue will miss the update and cause the queue to
2079 * be stopped forever
2080 */
2081 smp_mb();
2082
2083 if (dql_avail(&dev_queue->dql) < 0)
2084 return;
2085
2086 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2087 netif_schedule_queue(dev_queue);
Tom Herbert114cf582011-11-28 16:33:09 +00002088#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002089}
2090
2091static inline void netdev_completed_queue(struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00002092 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002093{
2094 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2095}
2096
2097static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2098{
Tom Herbert114cf582011-11-28 16:33:09 +00002099#ifdef CONFIG_BQL
Alexander Duyck5c490352012-02-07 02:29:01 +00002100 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
Tom Herbert114cf582011-11-28 16:33:09 +00002101 dql_reset(&q->dql);
2102#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00002103}
2104
2105static inline void netdev_reset_queue(struct net_device *dev_queue)
2106{
2107 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
David S. Millerc3f26a22008-07-31 16:58:50 -07002108}
2109
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002110/**
2111 * netif_running - test if up
2112 * @dev: network device
2113 *
2114 * Test if the device has been brought up.
2115 */
David S. Miller4d295152012-03-07 21:02:35 -05002116static inline bool netif_running(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117{
2118 return test_bit(__LINK_STATE_START, &dev->state);
2119}
2120
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002121/*
2122 * Routines to manage the subqueues on a device. We only need start
2123 * stop, and a check if it's stopped. All other device management is
2124 * done at the overall netdevice level.
2125 * Also test the device if we're multiqueue.
2126 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002127
2128/**
2129 * netif_start_subqueue - allow sending packets on subqueue
2130 * @dev: network device
2131 * @queue_index: sub queue index
2132 *
2133 * Start individual transmit queue of a device with multiple transmit queues.
2134 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002135static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2136{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002137 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002138
2139 netif_tx_start_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002140}
2141
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002142/**
2143 * netif_stop_subqueue - stop sending packets on subqueue
2144 * @dev: network device
2145 * @queue_index: sub queue index
2146 *
2147 * Stop individual transmit queue of a device with multiple transmit queues.
2148 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002149static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2150{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002151 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002152#ifdef CONFIG_NETPOLL_TRAP
2153 if (netpoll_trap())
2154 return;
2155#endif
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002156 netif_tx_stop_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002157}
2158
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002159/**
2160 * netif_subqueue_stopped - test status of subqueue
2161 * @dev: network device
2162 * @queue_index: sub queue index
2163 *
2164 * Check individual transmit queue of a device with multiple transmit queues.
2165 */
David S. Miller4d295152012-03-07 21:02:35 -05002166static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2167 u16 queue_index)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002168{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002169 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002170
2171 return netif_tx_queue_stopped(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002172}
2173
David S. Miller4d295152012-03-07 21:02:35 -05002174static inline bool netif_subqueue_stopped(const struct net_device *dev,
2175 struct sk_buff *skb)
Pavel Emelyanov668f8952007-10-21 17:01:56 -07002176{
2177 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2178}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002179
2180/**
2181 * netif_wake_subqueue - allow sending packets on subqueue
2182 * @dev: network device
2183 * @queue_index: sub queue index
2184 *
2185 * Resume individual transmit queue of a device with multiple transmit queues.
2186 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002187static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2188{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002189 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002190#ifdef CONFIG_NETPOLL_TRAP
2191 if (netpoll_trap())
2192 return;
2193#endif
Tom Herbert734664982011-11-28 16:32:44 +00002194 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07002195 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002196}
2197
Alexander Duyck537c00d2013-01-10 08:57:02 +00002198#ifdef CONFIG_XPS
Alexander Duyck537c00d2013-01-10 08:57:02 +00002199extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
2200 u16 index);
2201#else
2202static inline int netif_set_xps_queue(struct net_device *dev,
2203 struct cpumask *mask,
2204 u16 index)
2205{
2206 return 0;
2207}
2208#endif
2209
Vladislav Zolotarova3d22a62010-12-13 06:27:10 +00002210/*
2211 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2212 * as a distribution range limit for the returned value.
2213 */
2214static inline u16 skb_tx_hash(const struct net_device *dev,
2215 const struct sk_buff *skb)
2216{
2217 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2218}
2219
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002220/**
2221 * netif_is_multiqueue - test if device has multiple transmit queues
2222 * @dev: network device
2223 *
2224 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002225 */
David S. Miller4d295152012-03-07 21:02:35 -05002226static inline bool netif_is_multiqueue(const struct net_device *dev)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002227{
Eric Dumazeta02cec22010-09-22 20:43:57 +00002228 return dev->num_tx_queues > 1;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002229}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
Tom Herberte6484932010-10-18 18:04:39 +00002231extern int netif_set_real_num_tx_queues(struct net_device *dev,
2232 unsigned int txq);
John Fastabendf0796d52010-07-01 13:21:57 +00002233
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002234#ifdef CONFIG_RPS
2235extern int netif_set_real_num_rx_queues(struct net_device *dev,
2236 unsigned int rxq);
2237#else
2238static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2239 unsigned int rxq)
2240{
2241 return 0;
2242}
2243#endif
2244
Ben Hutchings3171d022010-09-27 08:24:49 +00002245static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2246 const struct net_device *from_dev)
2247{
Jiri Pirkoee6ae1a2012-07-20 02:28:46 +00002248 int err;
2249
2250 err = netif_set_real_num_tx_queues(to_dev,
2251 from_dev->real_num_tx_queues);
2252 if (err)
2253 return err;
Ben Hutchings3171d022010-09-27 08:24:49 +00002254#ifdef CONFIG_RPS
2255 return netif_set_real_num_rx_queues(to_dev,
2256 from_dev->real_num_rx_queues);
2257#else
2258 return 0;
2259#endif
2260}
2261
Yuval Mintz16917b82012-07-01 03:18:50 +00002262#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2263extern int netif_get_num_default_rss_queues(void);
2264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265/* Use this variant when it is known for sure that it
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07002266 * is executing from hardware interrupt context or with hardware interrupts
2267 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002269extern void dev_kfree_skb_irq(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
2271/* Use this variant in places where it could be invoked
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07002272 * from either hardware interrupt or other context, with hardware interrupts
2273 * either disabled or enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002275extern void dev_kfree_skb_any(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277extern int netif_rx(struct sk_buff *skb);
2278extern int netif_rx_ni(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279extern int netif_receive_skb(struct sk_buff *skb);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002280extern gro_result_t napi_gro_receive(struct napi_struct *napi,
Herbert Xud565b0a2008-12-15 23:38:52 -08002281 struct sk_buff *skb);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00002282extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
Herbert Xu76620aa2009-04-16 02:02:07 -07002283extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002284extern gro_result_t napi_gro_frags(struct napi_struct *napi);
Herbert Xu76620aa2009-04-16 02:02:07 -07002285
2286static inline void napi_free_frags(struct napi_struct *napi)
2287{
2288 kfree_skb(napi->skb);
2289 napi->skb = NULL;
2290}
2291
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00002292extern int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00002293 rx_handler_func_t *rx_handler,
2294 void *rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00002295extern void netdev_rx_handler_unregister(struct net_device *dev);
2296
David S. Miller95f050b2012-03-06 16:12:15 -05002297extern bool dev_valid_name(const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002298extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2299extern int dev_ethtool(struct net *net, struct ifreq *);
Eric Dumazet95c96172012-04-15 05:58:06 +00002300extern unsigned int dev_get_flags(const struct net_device *);
Patrick McHardybd380812010-02-26 06:34:53 +00002301extern int __dev_change_flags(struct net_device *, unsigned int flags);
Eric Dumazet95c96172012-04-15 05:58:06 +00002302extern int dev_change_flags(struct net_device *, unsigned int);
Patrick McHardybd380812010-02-26 06:34:53 +00002303extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07002304extern int dev_change_name(struct net_device *, const char *);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07002305extern int dev_set_alias(struct net_device *, const char *, size_t);
Eric W. Biedermance286d32007-09-12 13:53:49 +02002306extern int dev_change_net_namespace(struct net_device *,
2307 struct net *, const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308extern int dev_set_mtu(struct net_device *, int);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00002309extern void dev_set_group(struct net_device *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310extern int dev_set_mac_address(struct net_device *,
2311 struct sockaddr *);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00002312extern int dev_change_carrier(struct net_device *,
2313 bool new_carrier);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002314extern int dev_hard_start_xmit(struct sk_buff *skb,
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002315 struct net_device *dev,
2316 struct netdev_queue *txq);
Arnd Bergmann44540962009-11-26 06:07:08 +00002317extern int dev_forward_skb(struct net_device *dev,
2318 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002320extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322/* Called by rtnetlink.c:rtnl_unlock() */
2323extern void netdev_run_todo(void);
2324
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002325/**
2326 * dev_put - release reference to device
2327 * @dev: network device
2328 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002329 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002330 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331static inline void dev_put(struct net_device *dev)
2332{
Christoph Lameter933393f2011-12-22 11:58:51 -06002333 this_cpu_dec(*dev->pcpu_refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334}
2335
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002336/**
2337 * dev_hold - get reference to device
2338 * @dev: network device
2339 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07002340 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002341 */
Stephen Hemminger15333062006-03-20 22:32:28 -08002342static inline void dev_hold(struct net_device *dev)
2343{
Christoph Lameter933393f2011-12-22 11:58:51 -06002344 this_cpu_inc(*dev->pcpu_refcnt);
Stephen Hemminger15333062006-03-20 22:32:28 -08002345}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346
2347/* Carrier loss detection, dial on demand. The functions netif_carrier_on
2348 * and _off may be called from IRQ context, but it is caller
2349 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08002350 *
2351 * The name carrier is inappropriate, these functions should really be
2352 * called netif_lowerlayer_*() because they represent the state of any
2353 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 */
2355
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01002356extern void linkwatch_init_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357extern void linkwatch_fire_event(struct net_device *dev);
Eric Dumazete014deb2009-11-17 05:59:21 +00002358extern void linkwatch_forget_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002360/**
2361 * netif_carrier_ok - test if carrier present
2362 * @dev: network device
2363 *
2364 * Check if carrier is present on device
2365 */
David S. Miller4d295152012-03-07 21:02:35 -05002366static inline bool netif_carrier_ok(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367{
2368 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2369}
2370
Eric Dumazet9d214932009-05-17 20:55:16 -07002371extern unsigned long dev_trans_start(struct net_device *dev);
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373extern void __netdev_watchdog_up(struct net_device *dev);
2374
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07002375extern void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07002377extern void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002379/**
2380 * netif_dormant_on - mark device as dormant.
2381 * @dev: network device
2382 *
2383 * Mark device as dormant (as per RFC2863).
2384 *
2385 * The dormant state indicates that the relevant interface is not
2386 * actually in a condition to pass packets (i.e., it is not 'up') but is
2387 * in a "pending" state, waiting for some external event. For "on-
2388 * demand" interfaces, this new state identifies the situation where the
2389 * interface is waiting for events to place it in the up state.
2390 *
2391 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002392static inline void netif_dormant_on(struct net_device *dev)
2393{
2394 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2395 linkwatch_fire_event(dev);
2396}
2397
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002398/**
2399 * netif_dormant_off - set device as not dormant.
2400 * @dev: network device
2401 *
2402 * Device is not in dormant state.
2403 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08002404static inline void netif_dormant_off(struct net_device *dev)
2405{
2406 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2407 linkwatch_fire_event(dev);
2408}
2409
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002410/**
2411 * netif_dormant - test if carrier present
2412 * @dev: network device
2413 *
2414 * Check if carrier is present on device
2415 */
David S. Miller4d295152012-03-07 21:02:35 -05002416static inline bool netif_dormant(const struct net_device *dev)
Stefan Rompfb00055a2006-03-20 17:09:11 -08002417{
2418 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2419}
2420
2421
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002422/**
2423 * netif_oper_up - test if device is operational
2424 * @dev: network device
2425 *
2426 * Check if carrier is operational
2427 */
David S. Miller4d295152012-03-07 21:02:35 -05002428static inline bool netif_oper_up(const struct net_device *dev)
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08002429{
Stefan Rompfb00055a2006-03-20 17:09:11 -08002430 return (dev->operstate == IF_OPER_UP ||
2431 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2432}
2433
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002434/**
2435 * netif_device_present - is device available or removed
2436 * @dev: network device
2437 *
2438 * Check if device has not been removed from system.
2439 */
David S. Miller4d295152012-03-07 21:02:35 -05002440static inline bool netif_device_present(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441{
2442 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2443}
2444
Denis Vlasenko56079432006-03-29 15:57:29 -08002445extern void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
Denis Vlasenko56079432006-03-29 15:57:29 -08002447extern void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
2449/*
2450 * Network interface message level settings
2451 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
2453enum {
2454 NETIF_MSG_DRV = 0x0001,
2455 NETIF_MSG_PROBE = 0x0002,
2456 NETIF_MSG_LINK = 0x0004,
2457 NETIF_MSG_TIMER = 0x0008,
2458 NETIF_MSG_IFDOWN = 0x0010,
2459 NETIF_MSG_IFUP = 0x0020,
2460 NETIF_MSG_RX_ERR = 0x0040,
2461 NETIF_MSG_TX_ERR = 0x0080,
2462 NETIF_MSG_TX_QUEUED = 0x0100,
2463 NETIF_MSG_INTR = 0x0200,
2464 NETIF_MSG_TX_DONE = 0x0400,
2465 NETIF_MSG_RX_STATUS = 0x0800,
2466 NETIF_MSG_PKTDATA = 0x1000,
2467 NETIF_MSG_HW = 0x2000,
2468 NETIF_MSG_WOL = 0x4000,
2469};
2470
2471#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2472#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2473#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2474#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2475#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2476#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2477#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2478#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2479#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2480#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2481#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2482#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2483#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2484#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2485#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2486
2487static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2488{
2489 /* use default */
2490 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2491 return default_msg_enable_bits;
2492 if (debug_value == 0) /* no output */
2493 return 0;
2494 /* set low N bits */
2495 return (1 << debug_value) - 1;
2496}
2497
David S. Millerc773e842008-07-08 23:13:53 -07002498static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07002499{
David S. Millerc773e842008-07-08 23:13:53 -07002500 spin_lock(&txq->_xmit_lock);
2501 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002502}
2503
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002504static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2505{
2506 spin_lock_bh(&txq->_xmit_lock);
2507 txq->xmit_lock_owner = smp_processor_id();
2508}
2509
David S. Miller4d295152012-03-07 21:02:35 -05002510static inline bool __netif_tx_trylock(struct netdev_queue *txq)
David S. Millerc773e842008-07-08 23:13:53 -07002511{
David S. Miller4d295152012-03-07 21:02:35 -05002512 bool ok = spin_trylock(&txq->_xmit_lock);
David S. Millerc773e842008-07-08 23:13:53 -07002513 if (likely(ok))
2514 txq->xmit_lock_owner = smp_processor_id();
2515 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07002516}
2517
David S. Millerc773e842008-07-08 23:13:53 -07002518static inline void __netif_tx_unlock(struct netdev_queue *txq)
2519{
2520 txq->xmit_lock_owner = -1;
2521 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07002522}
2523
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002524static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2525{
2526 txq->xmit_lock_owner = -1;
2527 spin_unlock_bh(&txq->_xmit_lock);
2528}
2529
Eric Dumazet08baf562009-05-25 22:58:01 -07002530static inline void txq_trans_update(struct netdev_queue *txq)
2531{
2532 if (txq->xmit_lock_owner != -1)
2533 txq->trans_start = jiffies;
2534}
2535
David S. Millerc3f26a22008-07-31 16:58:50 -07002536/**
2537 * netif_tx_lock - grab network device transmit lock
2538 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07002539 *
2540 * Get network device transmit lock
2541 */
2542static inline void netif_tx_lock(struct net_device *dev)
2543{
2544 unsigned int i;
2545 int cpu;
2546
2547 spin_lock(&dev->tx_global_lock);
2548 cpu = smp_processor_id();
2549 for (i = 0; i < dev->num_tx_queues; i++) {
2550 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2551
2552 /* We are the only thread of execution doing a
2553 * freeze, but we have to grab the _xmit_lock in
2554 * order to synchronize with threads which are in
2555 * the ->hard_start_xmit() handler and already
2556 * checked the frozen bit.
2557 */
2558 __netif_tx_lock(txq, cpu);
2559 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2560 __netif_tx_unlock(txq);
2561 }
2562}
2563
2564static inline void netif_tx_lock_bh(struct net_device *dev)
2565{
2566 local_bh_disable();
2567 netif_tx_lock(dev);
2568}
2569
Herbert Xu932ff272006-06-09 12:20:56 -07002570static inline void netif_tx_unlock(struct net_device *dev)
2571{
David S. Millere8a04642008-07-17 00:34:19 -07002572 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07002573
David S. Millere8a04642008-07-17 00:34:19 -07002574 for (i = 0; i < dev->num_tx_queues; i++) {
2575 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07002576
David S. Millerc3f26a22008-07-31 16:58:50 -07002577 /* No need to grab the _xmit_lock here. If the
2578 * queue is not stopped for another reason, we
2579 * force a schedule.
2580 */
2581 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00002582 netif_schedule_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07002583 }
2584 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07002585}
2586
2587static inline void netif_tx_unlock_bh(struct net_device *dev)
2588{
David S. Millere8a04642008-07-17 00:34:19 -07002589 netif_tx_unlock(dev);
2590 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07002591}
2592
David S. Millerc773e842008-07-08 23:13:53 -07002593#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002594 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07002595 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002596 } \
2597}
2598
David S. Millerc773e842008-07-08 23:13:53 -07002599#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002600 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07002601 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07002602 } \
2603}
2604
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605static inline void netif_tx_disable(struct net_device *dev)
2606{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002607 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07002608 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002609
David S. Millerc3f26a22008-07-31 16:58:50 -07002610 local_bh_disable();
2611 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002612 for (i = 0; i < dev->num_tx_queues; i++) {
2613 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07002614
2615 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002616 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07002617 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002618 }
David S. Millerc3f26a22008-07-31 16:58:50 -07002619 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620}
2621
David S. Millere308a5d2008-07-15 00:13:44 -07002622static inline void netif_addr_lock(struct net_device *dev)
2623{
2624 spin_lock(&dev->addr_list_lock);
2625}
2626
Jiri Pirko2429f7a2012-01-09 06:36:54 +00002627static inline void netif_addr_lock_nested(struct net_device *dev)
2628{
2629 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2630}
2631
David S. Millere308a5d2008-07-15 00:13:44 -07002632static inline void netif_addr_lock_bh(struct net_device *dev)
2633{
2634 spin_lock_bh(&dev->addr_list_lock);
2635}
2636
2637static inline void netif_addr_unlock(struct net_device *dev)
2638{
2639 spin_unlock(&dev->addr_list_lock);
2640}
2641
2642static inline void netif_addr_unlock_bh(struct net_device *dev)
2643{
2644 spin_unlock_bh(&dev->addr_list_lock);
2645}
2646
Jiri Pirkof001fde2009-05-05 02:48:28 +00002647/*
Jiri Pirko31278e72009-06-17 01:12:19 +00002648 * dev_addrs walker. Should be used only for read access. Call with
Jiri Pirkof001fde2009-05-05 02:48:28 +00002649 * rcu_read_lock held.
2650 */
2651#define for_each_dev_addr(dev, ha) \
Jiri Pirko31278e72009-06-17 01:12:19 +00002652 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00002653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654/* These functions live elsewhere (drivers/net/net_init.c, but related) */
2655
2656extern void ether_setup(struct net_device *dev);
2657
2658/* Support for loadable net-drivers */
Tom Herbert36909ea2011-01-09 19:36:31 +00002659extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002660 void (*setup)(struct net_device *),
Tom Herbert36909ea2011-01-09 19:36:31 +00002661 unsigned int txqs, unsigned int rxqs);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002662#define alloc_netdev(sizeof_priv, name, setup) \
Tom Herbert36909ea2011-01-09 19:36:31 +00002663 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2664
2665#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2666 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2667
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668extern int register_netdev(struct net_device *dev);
2669extern void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00002670
Jiri Pirko22bedad32010-04-01 21:22:57 +00002671/* General hardware address lists handling functions */
2672extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2673 struct netdev_hw_addr_list *from_list,
2674 int addr_len, unsigned char addr_type);
2675extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2676 struct netdev_hw_addr_list *from_list,
2677 int addr_len, unsigned char addr_type);
2678extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2679 struct netdev_hw_addr_list *from_list,
2680 int addr_len);
2681extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2682 struct netdev_hw_addr_list *from_list,
2683 int addr_len);
2684extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2685extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2686
Jiri Pirkof001fde2009-05-05 02:48:28 +00002687/* Functions used for device addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002688extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
Jiri Pirkof001fde2009-05-05 02:48:28 +00002689 unsigned char addr_type);
stephen hemminger6b6e2722012-09-17 10:03:26 +00002690extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
Jiri Pirkof001fde2009-05-05 02:48:28 +00002691 unsigned char addr_type);
2692extern int dev_addr_add_multiple(struct net_device *to_dev,
2693 struct net_device *from_dev,
2694 unsigned char addr_type);
2695extern int dev_addr_del_multiple(struct net_device *to_dev,
2696 struct net_device *from_dev,
2697 unsigned char addr_type);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002698extern void dev_addr_flush(struct net_device *dev);
2699extern int dev_addr_init(struct net_device *dev);
2700
2701/* Functions used for unicast addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002702extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2703extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2704extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002705extern int dev_uc_sync(struct net_device *to, struct net_device *from);
Vlad Yasevich4cd729b02013-04-15 09:54:25 +00002706extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002707extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2708extern void dev_uc_flush(struct net_device *dev);
2709extern void dev_uc_init(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00002710
Jiri Pirko22bedad32010-04-01 21:22:57 +00002711/* Functions used for multicast addresses handling */
stephen hemminger6b6e2722012-09-17 10:03:26 +00002712extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2713extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2714extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2715extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2716extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
Jiri Pirko22bedad32010-04-01 21:22:57 +00002717extern int dev_mc_sync(struct net_device *to, struct net_device *from);
Vlad Yasevich4cd729b02013-04-15 09:54:25 +00002718extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
Jiri Pirko22bedad32010-04-01 21:22:57 +00002719extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2720extern void dev_mc_flush(struct net_device *dev);
2721extern void dev_mc_init(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002722
2723/* Functions used for secondary unicast and multicast support */
2724extern void dev_set_rx_mode(struct net_device *dev);
2725extern void __dev_set_rx_mode(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002726extern int dev_set_promiscuity(struct net_device *dev, int inc);
2727extern int dev_set_allmulti(struct net_device *dev, int inc);
2728extern void netdev_state_change(struct net_device *dev);
Amerigo Wangee89bab2012-08-09 22:14:56 +00002729extern void netdev_notify_peers(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002730extern void netdev_features_change(struct net_device *dev);
2731/* Load a device via the kmod */
2732extern void dev_load(struct net *net, const char *name);
Ben Hutchingsd7753512010-07-09 09:12:41 +00002733extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2734 struct rtnl_link_stats64 *storage);
Eric Dumazet77a1abf2012-03-05 04:50:09 +00002735extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2736 const struct net_device_stats *netdev_stats);
Herbert Xufb286bb2005-11-10 13:01:24 -08002737
2738extern int netdev_max_backlog;
Eric Dumazet3b098e22010-05-15 23:57:10 -07002739extern int netdev_tstamp_prequeue;
Herbert Xufb286bb2005-11-10 13:01:24 -08002740extern int weight_p;
Eric Dumazet0a148422011-04-20 09:27:32 +00002741extern int bpf_jit_enable;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00002742
2743extern bool netdev_has_upper_dev(struct net_device *dev,
2744 struct net_device *upper_dev);
2745extern bool netdev_has_any_upper_dev(struct net_device *dev);
2746extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2747extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2748extern int netdev_upper_dev_link(struct net_device *dev,
2749 struct net_device *upper_dev);
2750extern int netdev_master_upper_dev_link(struct net_device *dev,
2751 struct net_device *upper_dev);
2752extern void netdev_upper_dev_unlink(struct net_device *dev,
2753 struct net_device *upper_dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08002754extern int skb_checksum_help(struct sk_buff *skb);
Cong Wang12b00042013-02-05 16:36:38 +00002755extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2756 netdev_features_t features, bool tx_path);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002757extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2758 netdev_features_t features);
Cong Wang12b00042013-02-05 16:36:38 +00002759
2760static inline
2761struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2762{
2763 return __skb_gso_segment(skb, features, true);
2764}
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002765__be16 skb_network_protocol(struct sk_buff *skb);
2766
2767static inline bool can_checksum_protocol(netdev_features_t features,
2768 __be16 protocol)
2769{
2770 return ((features & NETIF_F_GEN_CSUM) ||
2771 ((features & NETIF_F_V4_CSUM) &&
2772 protocol == htons(ETH_P_IP)) ||
2773 ((features & NETIF_F_V6_CSUM) &&
2774 protocol == htons(ETH_P_IPV6)) ||
2775 ((features & NETIF_F_FCOE_CRC) &&
2776 protocol == htons(ETH_P_FCOE)));
2777}
Cong Wang12b00042013-02-05 16:36:38 +00002778
Herbert Xufb286bb2005-11-10 13:01:24 -08002779#ifdef CONFIG_BUG
2780extern void netdev_rx_csum_fault(struct net_device *dev);
2781#else
2782static inline void netdev_rx_csum_fault(struct net_device *dev)
2783{
2784}
2785#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786/* rx skb timestamps */
2787extern void net_enable_timestamp(void);
2788extern void net_disable_timestamp(void);
2789
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002790#ifdef CONFIG_PROC_FS
Cong Wang900ff8c2013-02-18 19:20:33 +00002791extern int __init dev_proc_init(void);
2792#else
2793#define dev_proc_init() 0
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002794#endif
2795
Jay Vosburghb8a97872008-06-13 18:12:04 -07002796extern int netdev_class_create_file(struct class_attribute *class_attr);
2797extern void netdev_class_remove_file(struct class_attribute *class_attr);
2798
Johannes Berg04600792010-08-05 17:45:15 +02002799extern struct kobj_ns_type_operations net_ns_type_operations;
2800
David S. Miller3019de12011-06-06 16:41:33 -07002801extern const char *netdev_drivername(const struct net_device *dev);
Arjan van de Ven6579e572008-07-21 13:31:48 -07002802
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03002803extern void linkwatch_run_queue(void);
2804
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002805static inline netdev_features_t netdev_get_wanted_features(
2806 struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00002807{
2808 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2809}
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002810netdev_features_t netdev_increment_features(netdev_features_t all,
2811 netdev_features_t one, netdev_features_t mask);
Eric Dumazetb0ce3502013-05-16 07:34:53 +00002812
2813/* Allow TSO being used on stacked device :
2814 * Performing the GSO segmentation before last device
2815 * is a performance improvement.
2816 */
2817static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2818 netdev_features_t mask)
2819{
2820 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2821}
2822
Michał Mirosław6cb6a272011-04-02 22:48:47 -07002823int __netdev_update_features(struct net_device *dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00002824void netdev_update_features(struct net_device *dev);
Michał Mirosławafe12cc2011-05-07 03:22:17 +00002825void netdev_change_features(struct net_device *dev);
Herbert Xu7f353bf2007-08-10 15:47:58 -07002826
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08002827void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2828 struct net_device *dev);
2829
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002830netdev_features_t netif_skb_features(struct sk_buff *skb);
Jesse Gross58e998c2010-10-29 12:14:55 +00002831
David S. Miller4d295152012-03-07 21:02:35 -05002832static inline bool net_gso_ok(netdev_features_t features, int gso_type)
Herbert Xubcd76112006-06-30 13:36:35 -07002833{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002834 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
Michał Mirosław0345e182011-11-16 14:05:33 +00002835
2836 /* check flags correspondence */
2837 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2838 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2839 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2840 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2841 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2842 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2843
Herbert Xubcd76112006-06-30 13:36:35 -07002844 return (features & feature) == feature;
2845}
2846
David S. Miller4d295152012-03-07 21:02:35 -05002847static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
Herbert Xu576a30e2006-06-27 13:22:38 -07002848{
Herbert Xu278b2512009-06-03 21:20:51 -07002849 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
David S. Miller21dc3302010-08-23 00:13:46 -07002850 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07002851}
2852
David S. Miller4d295152012-03-07 21:02:35 -05002853static inline bool netif_needs_gso(struct sk_buff *skb,
2854 netdev_features_t features)
Herbert Xu79671682006-06-22 02:40:14 -07002855{
Jesse Grossfc741212011-01-09 06:23:32 +00002856 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
Yi Zoucdbee742012-03-16 23:08:11 +00002857 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2858 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
Herbert Xu79671682006-06-22 02:40:14 -07002859}
2860
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002861static inline void netif_set_gso_max_size(struct net_device *dev,
2862 unsigned int size)
2863{
2864 dev->gso_max_size = size;
2865}
2866
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00002867static inline bool netif_is_bond_master(struct net_device *dev)
2868{
2869 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2870}
2871
David S. Miller4d295152012-03-07 21:02:35 -05002872static inline bool netif_is_bond_slave(struct net_device *dev)
Jiri Pirko1765a572011-02-12 06:48:36 +00002873{
2874 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2875}
2876
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002877static inline bool netif_supports_nofcs(struct net_device *dev)
2878{
2879 return dev->priv_flags & IFF_SUPP_NOFCS;
2880}
2881
Eric W. Biederman505d4f72008-11-07 22:54:20 -08002882extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00002883
Joe Perches571ba422010-02-09 11:49:47 +00002884/* Logging, debugging and troubleshooting/diagnostic helpers. */
2885
2886/* netdev_printk helpers, similar to dev_printk */
2887
2888static inline const char *netdev_name(const struct net_device *dev)
2889{
2890 if (dev->reg_state != NETREG_REGISTERED)
2891 return "(unregistered net_device)";
2892 return dev->name;
2893}
2894
Joe Perchesb9075fa2011-10-31 17:11:33 -07002895extern __printf(3, 4)
2896int netdev_printk(const char *level, const struct net_device *dev,
2897 const char *format, ...);
2898extern __printf(2, 3)
2899int netdev_emerg(const struct net_device *dev, const char *format, ...);
2900extern __printf(2, 3)
2901int netdev_alert(const struct net_device *dev, const char *format, ...);
2902extern __printf(2, 3)
2903int netdev_crit(const struct net_device *dev, const char *format, ...);
2904extern __printf(2, 3)
2905int netdev_err(const struct net_device *dev, const char *format, ...);
2906extern __printf(2, 3)
2907int netdev_warn(const struct net_device *dev, const char *format, ...);
2908extern __printf(2, 3)
2909int netdev_notice(const struct net_device *dev, const char *format, ...);
2910extern __printf(2, 3)
2911int netdev_info(const struct net_device *dev, const char *format, ...);
Joe Perches571ba422010-02-09 11:49:47 +00002912
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03002913#define MODULE_ALIAS_NETDEV(device) \
2914 MODULE_ALIAS("netdev-" device)
2915
Jim Cromieb558c962011-12-19 17:11:18 -05002916#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perches571ba422010-02-09 11:49:47 +00002917#define netdev_dbg(__dev, format, args...) \
2918do { \
Jason Baronffa10cb2011-08-11 14:36:48 -04002919 dynamic_netdev_dbg(__dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00002920} while (0)
Jim Cromieb558c962011-12-19 17:11:18 -05002921#elif defined(DEBUG)
2922#define netdev_dbg(__dev, format, args...) \
2923 netdev_printk(KERN_DEBUG, __dev, format, ##args)
Joe Perches571ba422010-02-09 11:49:47 +00002924#else
2925#define netdev_dbg(__dev, format, args...) \
2926({ \
2927 if (0) \
2928 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2929 0; \
2930})
2931#endif
2932
2933#if defined(VERBOSE_DEBUG)
2934#define netdev_vdbg netdev_dbg
2935#else
2936
2937#define netdev_vdbg(dev, format, args...) \
2938({ \
2939 if (0) \
2940 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2941 0; \
2942})
2943#endif
2944
2945/*
2946 * netdev_WARN() acts like dev_printk(), but with the key difference
2947 * of using a WARN/WARN_ON to get the message out, including the
2948 * file/line information and a backtrace.
2949 */
2950#define netdev_WARN(dev, format, args...) \
2951 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2952
Joe Perchesb3d95c52010-02-09 11:49:49 +00002953/* netif printk helpers, similar to netdev_printk */
2954
2955#define netif_printk(priv, type, level, dev, fmt, args...) \
2956do { \
2957 if (netif_msg_##type(priv)) \
2958 netdev_printk(level, (dev), fmt, ##args); \
2959} while (0)
2960
Joe Perchesf45f4322010-06-27 01:02:36 +00002961#define netif_level(level, priv, type, dev, fmt, args...) \
2962do { \
2963 if (netif_msg_##type(priv)) \
2964 netdev_##level(dev, fmt, ##args); \
2965} while (0)
2966
Joe Perchesb3d95c52010-02-09 11:49:49 +00002967#define netif_emerg(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002968 netif_level(emerg, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002969#define netif_alert(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002970 netif_level(alert, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002971#define netif_crit(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002972 netif_level(crit, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002973#define netif_err(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002974 netif_level(err, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002975#define netif_warn(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002976 netif_level(warn, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002977#define netif_notice(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002978 netif_level(notice, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002979#define netif_info(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00002980 netif_level(info, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002981
Joe Perches0053ea92012-05-30 07:43:34 +00002982#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002983#define netif_dbg(priv, type, netdev, format, args...) \
2984do { \
2985 if (netif_msg_##type(priv)) \
Jason Baronb5fb0a02011-08-11 14:36:53 -04002986 dynamic_netdev_dbg(netdev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00002987} while (0)
Joe Perches0053ea92012-05-30 07:43:34 +00002988#elif defined(DEBUG)
2989#define netif_dbg(priv, type, dev, format, args...) \
2990 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00002991#else
2992#define netif_dbg(priv, type, dev, format, args...) \
2993({ \
2994 if (0) \
2995 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2996 0; \
2997})
2998#endif
2999
3000#if defined(VERBOSE_DEBUG)
Ben Hutchingsbcfcc452010-07-02 07:08:44 +00003001#define netif_vdbg netif_dbg
Joe Perchesb3d95c52010-02-09 11:49:49 +00003002#else
3003#define netif_vdbg(priv, type, dev, format, args...) \
3004({ \
3005 if (0) \
Ben Hutchingsa4ed89c2010-05-18 06:56:32 +00003006 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00003007 0; \
3008})
3009#endif
Joe Perches571ba422010-02-09 11:49:47 +00003010
Cong Wang900ff8c2013-02-18 19:20:33 +00003011/*
3012 * The list of packet types we will receive (as opposed to discard)
3013 * and the routines to invoke.
3014 *
3015 * Why 16. Because with 16 the only overlap we get on a hash of the
3016 * low nibble of the protocol value is RARP/SNAP/X.25.
3017 *
3018 * NOTE: That is no longer true with the addition of VLAN tags. Not
3019 * sure which should go first, but I bet it won't make much
3020 * difference if we are running VLANs. The good news is that
3021 * this protocol won't be in the list unless compiled in, so
3022 * the average user (w/out VLANs) will not be adversely affected.
3023 * --BLG
3024 *
3025 * 0800 IP
3026 * 8100 802.1Q VLAN
3027 * 0001 802.3
3028 * 0002 AX.25
3029 * 0004 802.2
3030 * 8035 RARP
3031 * 0005 SNAP
3032 * 0805 X.25
3033 * 0806 ARP
3034 * 8137 IPX
3035 * 0009 Localtalk
3036 * 86DD IPv6
3037 */
3038#define PTYPE_HASH_SIZE (16)
3039#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3040
Jiri Pirko385a1542009-05-27 15:48:07 -07003041#endif /* _LINUX_NETDEVICE_H */