blob: baed5d5088c58c0102e08eb098769403e7a73296 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070014 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
Al Virod7fe0f22006-12-03 23:15:30 -050028#include <linux/timer.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050029#include <linux/bug.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070030#include <linux/delay.h>
Arun Sharma600634972011-07-26 16:09:06 -070031#include <linux/atomic.h>
Eric Dumazet53511452014-10-08 08:19:27 -070032#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/cache.h>
34#include <asm/byteorder.h>
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070037#include <linux/rculist.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070038#include <linux/workqueue.h>
Tom Herbert114cf582011-11-28 16:33:09 +000039#include <linux/dynamic_queue_limits.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000041#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020042#include <net/net_namespace.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080043#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080044#include <net/dcbnl.h>
45#endif
Neil Horman5bc14212011-11-22 05:10:51 +000046#include <net/netprio_cgroup.h>
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +010047#include <net/xdp.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020048
Michał Mirosława59e2ec2011-11-15 15:29:55 +000049#include <linux/netdev_features.h>
John Fastabend77162022012-04-15 06:43:56 +000050#include <linux/neighbour.h>
David Howells607ca462012-10-13 10:46:48 +010051#include <uapi/linux/netdevice.h>
Moni Shoua61bd3852015-02-03 16:48:29 +020052#include <uapi/linux/if_bonding.h>
John Fastabende4c67342016-02-16 21:16:15 -080053#include <uapi/linux/pkt_cls.h>
Jiri Kosina59cc1f62016-08-10 11:05:15 +020054#include <linux/hashtable.h>
Michał Mirosława59e2ec2011-11-15 15:29:55 +000055
Jeff Moyer115c1d62005-06-22 22:05:31 -070056struct netpoll_info;
Paul Gortmaker313162d2012-01-30 11:46:54 -050057struct device;
Richard Cochranc1f19b52010-07-17 08:49:36 +000058struct phy_device;
Vivien Didelot2f657a62017-09-29 17:19:20 -040059struct dsa_port;
Andrew Lunnc6e970a2017-03-28 23:45:06 +020060
Russell Kinge679c9c2018-03-28 15:44:16 -070061struct sfp_bus;
Johannes Berg704232c2007-04-23 12:20:05 -070062/* 802.11 specific */
63struct wireless_dev;
Alexander Aring98a18b62014-11-02 06:44:54 +010064/* 802.15.4 specific */
65struct wpan_dev;
Robert Shearman03c57742015-04-22 11:14:37 +010066struct mpls_dev;
Alexander Duyck7c46a642016-06-16 12:21:00 -070067/* UDP Tunnel offloads */
68struct udp_tunnel_info;
Brenden Blancoa7862b42016-07-19 12:16:48 -070069struct bpf_prog;
John Fastabend814abfa2017-07-17 09:27:07 -070070struct xdp_buff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Joe Perchesf629d202013-09-26 14:48:15 -070072void netdev_set_default_ethtool_ops(struct net_device *dev,
73 const struct ethtool_ops *ops);
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +000074
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000075/* Backlog congestion levels */
76#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
77#define NET_RX_DROP 1 /* packet dropped */
78
Patrick McHardy572a9d72009-11-10 06:14:14 +000079/*
80 * Transmit return codes: transmit return codes originate from three different
81 * namespaces:
82 *
83 * - qdisc return codes
84 * - driver transmit return codes
85 * - errno values
86 *
87 * Drivers are allowed to return any one of those in their hard_start_xmit()
88 * function. Real network devices commonly used with qdiscs should only return
89 * the driver transmit return codes though - when qdiscs are used, the actual
90 * transmission happens asynchronously, so the value is not propagated to
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -050091 * higher layers. Virtual network devices transmit synchronously; in this case
92 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
Patrick McHardy572a9d72009-11-10 06:14:14 +000093 * others are propagated to higher layers.
94 */
95
96/* qdisc ->enqueue() return codes. */
97#define NET_XMIT_SUCCESS 0x00
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000098#define NET_XMIT_DROP 0x01 /* skb dropped */
99#define NET_XMIT_CN 0x02 /* congestion notification */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000100#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200102/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
103 * indicates that the device will soon be dropping packets, or already drops
104 * some packets of the same priority; prompting us to send less aggressively. */
Patrick McHardy572a9d72009-11-10 06:14:14 +0000105#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
107
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000108/* Driver transmit return codes */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000109#define NETDEV_TX_MASK 0xf0
Patrick McHardy572a9d72009-11-10 06:14:14 +0000110
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000111enum netdev_tx {
Patrick McHardy572a9d72009-11-10 06:14:14 +0000112 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000113 NETDEV_TX_OK = 0x00, /* driver took care of packet */
114 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000115};
116typedef enum netdev_tx netdev_tx_t;
117
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000118/*
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
121 */
122static inline bool dev_xmit_complete(int rc)
123{
124 /*
125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
129 */
130 if (likely(rc < NET_XMIT_MASK))
131 return true;
132
133 return false;
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500137 * Compute the worst-case header length according to the protocols
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 * used.
139 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800140
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800141#if defined(CONFIG_HYPERV_NET)
142# define LL_MAX_HEADER 128
143#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
David S. Miller8388e3d2008-05-12 20:17:33 -0700144# if defined(CONFIG_MAC80211_MESH)
145# define LL_MAX_HEADER 128
146# else
147# define LL_MAX_HEADER 96
148# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700150# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151#endif
152
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000153#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
154 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155#define MAX_HEADER LL_MAX_HEADER
156#else
157#define MAX_HEADER (LL_MAX_HEADER + 48)
158#endif
159
160/*
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000161 * Old network device statistics. Fields are native words
162 * (unsigned long) so they can be read and written atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800164
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800165struct net_device_stats {
Ben Hutchings3cfde792010-07-09 09:11:52 +0000166 unsigned long rx_packets;
167 unsigned long tx_packets;
168 unsigned long rx_bytes;
169 unsigned long tx_bytes;
170 unsigned long rx_errors;
171 unsigned long tx_errors;
172 unsigned long rx_dropped;
173 unsigned long tx_dropped;
174 unsigned long multicast;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 unsigned long collisions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 unsigned long rx_length_errors;
Ben Hutchings3cfde792010-07-09 09:11:52 +0000177 unsigned long rx_over_errors;
178 unsigned long rx_crc_errors;
179 unsigned long rx_frame_errors;
180 unsigned long rx_fifo_errors;
181 unsigned long rx_missed_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 unsigned long tx_aborted_errors;
183 unsigned long tx_carrier_errors;
184 unsigned long tx_fifo_errors;
185 unsigned long tx_heartbeat_errors;
186 unsigned long tx_window_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 unsigned long rx_compressed;
188 unsigned long tx_compressed;
189};
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192#include <linux/cache.h>
193#include <linux/skbuff.h>
194
Eric Dumazetadc93002011-11-17 03:13:26 +0000195#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +0100196#include <linux/static_key.h>
197extern struct static_key rps_needed;
Eric Dumazet13bfff22016-12-07 08:29:10 -0800198extern struct static_key rfs_needed;
Eric Dumazetadc93002011-11-17 03:13:26 +0000199#endif
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201struct neighbour;
202struct neigh_parms;
203struct sk_buff;
204
Jiri Pirkof001fde2009-05-05 02:48:28 +0000205struct netdev_hw_addr {
206 struct list_head list;
207 unsigned char addr[MAX_ADDR_LEN];
208 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000209#define NETDEV_HW_ADDR_T_LAN 1
210#define NETDEV_HW_ADDR_T_SAN 2
211#define NETDEV_HW_ADDR_T_SLAVE 3
212#define NETDEV_HW_ADDR_T_UNICAST 4
Jiri Pirko22bedad32010-04-01 21:22:57 +0000213#define NETDEV_HW_ADDR_T_MULTICAST 5
Jiri Pirko22bedad32010-04-01 21:22:57 +0000214 bool global_use;
Vlad Yasevich4cd729b02013-04-15 09:54:25 +0000215 int sync_cnt;
Eric Dumazet8f8f1032010-09-19 11:24:02 -0700216 int refcount;
Vlad Yasevich4543fbe2013-04-02 17:10:07 -0400217 int synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000218 struct rcu_head rcu_head;
219};
220
Jiri Pirko31278e72009-06-17 01:12:19 +0000221struct netdev_hw_addr_list {
222 struct list_head list;
223 int count;
224};
225
Jiri Pirko22bedad32010-04-01 21:22:57 +0000226#define netdev_hw_addr_list_count(l) ((l)->count)
227#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
228#define netdev_hw_addr_list_for_each(ha, l) \
229 list_for_each_entry(ha, &(l)->list, list)
230
231#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
232#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800233#define netdev_for_each_uc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000234 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800235
Jiri Pirko22bedad32010-04-01 21:22:57 +0000236#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
237#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
Pavel Roskin18e225f2010-04-07 16:40:09 -0700238#define netdev_for_each_mc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000239 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
Jiri Pirko6683ece32010-02-04 10:22:25 -0800240
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800241struct hh_cache {
Alexey Dobriyan5b3dc2f2017-04-10 11:11:17 +0300242 unsigned int hh_len;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800243 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245 /* cached hardware header; allow for machine alignment needs. */
246#define HH_DATA_MOD 16
247#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700248 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249#define HH_DATA_ALIGN(__len) \
250 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
251 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
252};
253
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500254/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 * Alternative is:
256 * dev->hard_header_len ? (dev->hard_header_len +
257 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
258 *
259 * We could use other alignment values, but we must maintain the
260 * relationship HH alignment <= LL alignment.
261 */
262#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700263 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700265 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700267struct header_ops {
268 int (*create) (struct sk_buff *skb, struct net_device *dev,
269 unsigned short type, const void *daddr,
Eric Dumazet95c96172012-04-15 05:58:06 +0000270 const void *saddr, unsigned int len);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700271 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
David S. Millere69dd332011-07-12 23:28:12 -0700272 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700273 void (*cache_update)(struct hh_cache *hh,
274 const struct net_device *dev,
275 const unsigned char *haddr);
Willem de Bruijn2793a232016-03-09 21:58:32 -0500276 bool (*validate)(const char *ll_header, unsigned int len);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700277};
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279/* These flag bits are private to the generic network queueing
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500280 * layer; they may not be explicitly referenced by any other
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 * code.
282 */
283
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800284enum netdev_state_t {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 __LINK_STATE_START,
286 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800288 __LINK_STATE_LINKWATCH_PENDING,
289 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290};
291
292
293/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500294 * This structure holds boot-time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800295 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 */
297struct netdev_boot_setup {
298 char name[IFNAMSIZ];
299 struct ifmap map;
300};
301#define NETDEV_BOOT_SETUP_MAX 8
302
Joe Perchesf629d202013-09-26 14:48:15 -0700303int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Li RongQing6312fe72018-07-05 14:34:32 +0800305struct gro_list {
306 struct list_head list;
307 int count;
308};
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310/*
Li RongQingd9f37d02018-07-13 14:41:36 +0800311 * size of gro hash buckets, must less than bit number of
312 * napi_struct::gro_bitmask
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700313 */
David Miller07d78362018-06-24 14:14:02 +0900314#define GRO_HASH_BUCKETS 8
Li RongQingd9f37d02018-07-13 14:41:36 +0800315
316/*
317 * Structure for NAPI scheduling similar to tasklet but with weighting
318 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700319struct napi_struct {
320 /* The poll_list must only be managed by the entity which
321 * changes the state of the NAPI_STATE_SCHED bit. This means
322 * whoever atomically sets that bit can add this napi_struct
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500323 * to the per-CPU poll_list, and whoever clears that bit
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700324 * can remove from the list right before clearing the bit.
325 */
326 struct list_head poll_list;
327
328 unsigned long state;
329 int weight;
Li RongQingd9f37d02018-07-13 14:41:36 +0800330 unsigned long gro_bitmask;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700331 int (*poll)(struct napi_struct *, int);
332#ifdef CONFIG_NETPOLL
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700333 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700334#endif
Herbert Xu5d38a072009-01-04 16:13:40 -0800335 struct net_device *dev;
Li RongQing6312fe72018-07-05 14:34:32 +0800336 struct gro_list gro_hash[GRO_HASH_BUCKETS];
Herbert Xu5d38a072009-01-04 16:13:40 -0800337 struct sk_buff *skb;
Eric Dumazet3b47d302014-11-06 21:09:44 -0800338 struct hrtimer timer;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000339 struct list_head dev_list;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300340 struct hlist_node napi_hash_node;
341 unsigned int napi_id;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700342};
343
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800344enum {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700345 NAPI_STATE_SCHED, /* Poll is scheduled */
Eric Dumazet39e6c822017-02-28 10:34:50 -0800346 NAPI_STATE_MISSED, /* reschedule a napi */
David S. Millera0a46192008-01-07 20:35:07 -0800347 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800348 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Eric Dumazetd64b5e82015-11-18 06:31:00 -0800349 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
350 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
Eric Dumazet217f6972016-11-15 10:15:11 -0800351 NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
352};
353
354enum {
Eric Dumazet39e6c822017-02-28 10:34:50 -0800355 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
356 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
357 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
358 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
359 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
360 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
361 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700362};
363
Ben Hutchings5b252f02009-10-29 07:17:09 +0000364enum gro_result {
Herbert Xud1c76af2009-03-16 10:50:02 -0700365 GRO_MERGED,
366 GRO_MERGED_FREE,
367 GRO_HELD,
368 GRO_NORMAL,
369 GRO_DROP,
Steffen Klassert25393d32017-02-15 09:39:44 +0100370 GRO_CONSUMED,
Herbert Xud1c76af2009-03-16 10:50:02 -0700371};
Ben Hutchings5b252f02009-10-29 07:17:09 +0000372typedef enum gro_result gro_result_t;
Herbert Xud1c76af2009-03-16 10:50:02 -0700373
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000374/*
375 * enum rx_handler_result - Possible return values for rx_handlers.
376 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
377 * further.
378 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
379 * case skb->dev was changed by rx_handler.
380 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500381 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000382 *
383 * rx_handlers are functions called from inside __netif_receive_skb(), to do
384 * special processing of the skb, prior to delivery to protocol handlers.
385 *
386 * Currently, a net_device can only have a single rx_handler registered. Trying
387 * to register a second rx_handler will return -EBUSY.
388 *
389 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
390 * To unregister a rx_handler on a net_device, use
391 * netdev_rx_handler_unregister().
392 *
393 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
394 * do with the skb.
395 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500396 * If the rx_handler consumed the skb in some way, it should return
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000397 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500398 * the skb to be delivered in some other way.
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000399 *
400 * If the rx_handler changed skb->dev, to divert the skb to another
401 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
402 * new device will be called if it exists.
403 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500404 * If the rx_handler decides the skb should be ignored, it should return
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000405 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
Adam Buchbinderd93cf062012-09-19 21:47:58 -0400406 * are registered on exact device (ptype->dev == skb->dev).
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000407 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500408 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000409 * delivered, it should return RX_HANDLER_PASS.
410 *
411 * A device without a registered rx_handler will behave as if rx_handler
412 * returned RX_HANDLER_PASS.
413 */
414
415enum rx_handler_result {
416 RX_HANDLER_CONSUMED,
417 RX_HANDLER_ANOTHER,
418 RX_HANDLER_EXACT,
419 RX_HANDLER_PASS,
420};
421typedef enum rx_handler_result rx_handler_result_t;
422typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000423
Joe Perchesf629d202013-09-26 14:48:15 -0700424void __napi_schedule(struct napi_struct *n);
Eric Dumazetbc9ad162014-10-28 18:05:13 -0700425void __napi_schedule_irqoff(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700426
David S. Miller4d295152012-03-07 21:02:35 -0500427static inline bool napi_disable_pending(struct napi_struct *n)
David S. Millera0a46192008-01-07 20:35:07 -0800428{
429 return test_bit(NAPI_STATE_DISABLE, &n->state);
430}
431
Eric Dumazet39e6c822017-02-28 10:34:50 -0800432bool napi_schedule_prep(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700433
434/**
435 * napi_schedule - schedule NAPI poll
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500436 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700437 *
438 * Schedule NAPI poll routine to be called if it is not already
439 * running.
440 */
441static inline void napi_schedule(struct napi_struct *n)
442{
443 if (napi_schedule_prep(n))
444 __napi_schedule(n);
445}
446
Eric Dumazetbc9ad162014-10-28 18:05:13 -0700447/**
448 * napi_schedule_irqoff - schedule NAPI poll
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500449 * @n: NAPI context
Eric Dumazetbc9ad162014-10-28 18:05:13 -0700450 *
451 * Variant of napi_schedule(), assuming hard irqs are masked.
452 */
453static inline void napi_schedule_irqoff(struct napi_struct *n)
454{
455 if (napi_schedule_prep(n))
456 __napi_schedule_irqoff(n);
457}
458
Roland Dreierbfe13f52007-10-09 15:47:37 -0700459/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
David S. Miller4d295152012-03-07 21:02:35 -0500460static inline bool napi_reschedule(struct napi_struct *napi)
Roland Dreierbfe13f52007-10-09 15:47:37 -0700461{
462 if (napi_schedule_prep(napi)) {
463 __napi_schedule(napi);
David S. Miller4d295152012-03-07 21:02:35 -0500464 return true;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700465 }
David S. Miller4d295152012-03-07 21:02:35 -0500466 return false;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700467}
468
Eric Dumazet364b6052016-11-15 10:15:13 -0800469bool napi_complete_done(struct napi_struct *n, int work_done);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700470/**
471 * napi_complete - NAPI processing complete
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500472 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700473 *
474 * Mark NAPI processing as complete.
Eric Dumazet3b47d302014-11-06 21:09:44 -0800475 * Consider using napi_complete_done() instead.
Eric Dumazet364b6052016-11-15 10:15:13 -0800476 * Return false if device should avoid rearming interrupts.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700477 */
Eric Dumazet364b6052016-11-15 10:15:13 -0800478static inline bool napi_complete(struct napi_struct *n)
Eric Dumazet3b47d302014-11-06 21:09:44 -0800479{
480 return napi_complete_done(n, 0);
481}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700482
483/**
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300484 * napi_hash_del - remove a NAPI from global table
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500485 * @napi: NAPI context
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300486 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500487 * Warning: caller must observe RCU grace period
Eric Dumazet34cbe272015-11-18 06:31:02 -0800488 * before freeing memory containing @napi, if
489 * this function returns true.
Eric Dumazet93d05d42015-11-18 06:31:03 -0800490 * Note: core networking stack automatically calls it
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500491 * from netif_napi_del().
Eric Dumazet93d05d42015-11-18 06:31:03 -0800492 * Drivers might want to call this helper to combine all
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500493 * the needed RCU grace periods into a single one.
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300494 */
Eric Dumazet34cbe272015-11-18 06:31:02 -0800495bool napi_hash_del(struct napi_struct *napi);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300496
497/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700498 * napi_disable - prevent NAPI from scheduling
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500499 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700500 *
501 * Stop NAPI from being scheduled on this context.
502 * Waits till any outstanding processing completes.
503 */
Eric Dumazet3b47d302014-11-06 21:09:44 -0800504void napi_disable(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700505
506/**
507 * napi_enable - enable NAPI scheduling
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500508 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700509 *
510 * Resume NAPI from being scheduled on this context.
511 * Must be paired with napi_disable.
512 */
513static inline void napi_enable(struct napi_struct *n)
514{
515 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100516 smp_mb__before_atomic();
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700517 clear_bit(NAPI_STATE_SCHED, &n->state);
Neil Horman2d8bff1262015-09-23 14:57:58 -0400518 clear_bit(NAPI_STATE_NPSVC, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700519}
520
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700521/**
522 * napi_synchronize - wait until NAPI is not running
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500523 * @n: NAPI context
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700524 *
525 * Wait until NAPI is done being scheduled on this context.
526 * Waits till any outstanding processing completes but
527 * does not disable future activations.
528 */
529static inline void napi_synchronize(const struct napi_struct *n)
530{
Arnd Bergmannfacc4322016-01-22 11:43:44 +0100531 if (IS_ENABLED(CONFIG_SMP))
532 while (test_bit(NAPI_STATE_SCHED, &n->state))
533 msleep(1);
534 else
535 barrier();
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700536}
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700537
Magnus Karlsson6c5c958102018-08-28 14:44:28 +0200538/**
539 * napi_if_scheduled_mark_missed - if napi is running, set the
540 * NAPIF_STATE_MISSED
541 * @n: NAPI context
542 *
543 * If napi is running, set the NAPIF_STATE_MISSED, and return true if
544 * NAPI is scheduled.
545 **/
546static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
547{
548 unsigned long val, new;
549
550 do {
551 val = READ_ONCE(n->state);
552 if (val & NAPIF_STATE_DISABLE)
553 return true;
554
555 if (!(val & NAPIF_STATE_SCHED))
556 return false;
557
558 new = val | NAPIF_STATE_MISSED;
559 } while (cmpxchg(&n->state, val, new) != val);
560
561 return true;
562}
563
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800564enum netdev_queue_state_t {
Tom Herbert734664982011-11-28 16:32:44 +0000565 __QUEUE_STATE_DRV_XOFF,
566 __QUEUE_STATE_STACK_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700567 __QUEUE_STATE_FROZEN,
David S. Miller79d16382008-07-08 23:14:46 -0700568};
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +0200569
570#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
571#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
572#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
573
574#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
575#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
576 QUEUE_STATE_FROZEN)
577#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
578 QUEUE_STATE_FROZEN)
579
Tom Herbert734664982011-11-28 16:32:44 +0000580/*
581 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
582 * netif_tx_* functions below are used to manipulate this flag. The
583 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
584 * queue independently. The netif_xmit_*stopped functions below are called
585 * to check if the queue has been stopped by the driver or stack (either
586 * of the XOFF bits are set in the state). Drivers should not need to call
587 * netif_xmit*stopped functions, they should only be using netif_tx_*.
588 */
David S. Miller79d16382008-07-08 23:14:46 -0700589
David S. Millerbb949fb2008-07-08 16:55:56 -0700590struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700591/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500592 * read-mostly part
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700593 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700594 struct net_device *dev;
John Fastabend46e5da40a2014-09-12 20:04:52 -0700595 struct Qdisc __rcu *qdisc;
David S. Millerb0e1e642008-07-08 17:42:10 -0700596 struct Qdisc *qdisc_sleeping;
david decotignyccf5ff62011-11-16 12:15:10 +0000597#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000598 struct kobject kobj;
599#endif
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000600#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
601 int numa_node;
602#endif
Florian Westphalc0ef0792016-05-03 03:29:09 +0200603 unsigned long tx_maxrate;
604 /*
605 * Number of TX timeouts for this queue
606 * (/sys/class/net/DEV/Q/trans_timeout)
607 */
608 unsigned long trans_timeout;
Alexander Duyckffcfe252018-07-09 12:19:38 -0400609
610 /* Subordinate device that the queue has been assigned to */
611 struct net_device *sb_dev;
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700612/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500613 * write-mostly part
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700614 */
615 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
616 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700617 /*
Florian Westphal9b366272016-05-03 16:33:14 +0200618 * Time (in jiffies) of last Tx
Eric Dumazet9d214932009-05-17 20:55:16 -0700619 */
620 unsigned long trans_start;
david decotignyccf5ff62011-11-16 12:15:10 +0000621
Tom Herbert114cf582011-11-28 16:33:09 +0000622 unsigned long state;
623
624#ifdef CONFIG_BQL
625 struct dql dql;
626#endif
David S. Millere8a04642008-07-17 00:34:19 -0700627} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700628
Eric Dumazet79134e62018-03-08 12:51:41 -0800629extern int sysctl_fb_tunnels_only_for_init_net;
630
631static inline bool net_has_fallback_tunnels(const struct net *net)
632{
Arnd Bergmannbe9fc092018-03-13 12:44:53 +0100633 return net == &init_net ||
634 !IS_ENABLED(CONFIG_SYSCTL) ||
635 !sysctl_fb_tunnels_only_for_init_net;
Eric Dumazet79134e62018-03-08 12:51:41 -0800636}
637
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000638static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
639{
640#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
641 return q->numa_node;
642#else
Changli Gaob236da62010-12-14 03:09:15 +0000643 return NUMA_NO_NODE;
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000644#endif
645}
646
647static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
648{
649#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
650 q->numa_node = node;
651#endif
652}
653
Eric Dumazetdf334542010-03-24 19:13:54 +0000654#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000655/*
656 * This structure holds an RPS map which can be of variable length. The
657 * map is an array of CPUs.
658 */
659struct rps_map {
660 unsigned int len;
661 struct rcu_head rcu;
662 u16 cpus[0];
663};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000664#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
Tom Herbert0a9627f2010-03-16 08:03:29 +0000665
Tom Herbertfec5e652010-04-16 16:01:27 -0700666/*
Ben Hutchingsc4454772011-01-19 11:03:53 +0000667 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
668 * tail pointer for that CPU's input queue at the time of last enqueue, and
669 * a hardware filter index.
Tom Herbertfec5e652010-04-16 16:01:27 -0700670 */
671struct rps_dev_flow {
672 u16 cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +0000673 u16 filter;
Tom Herbertfec5e652010-04-16 16:01:27 -0700674 unsigned int last_qtail;
675};
Ben Hutchingsc4454772011-01-19 11:03:53 +0000676#define RPS_NO_FILTER 0xffff
Tom Herbertfec5e652010-04-16 16:01:27 -0700677
678/*
679 * The rps_dev_flow_table structure contains a table of flow mappings.
680 */
681struct rps_dev_flow_table {
682 unsigned int mask;
683 struct rcu_head rcu;
Tom Herbertfec5e652010-04-16 16:01:27 -0700684 struct rps_dev_flow flows[0];
685};
686#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000687 ((_num) * sizeof(struct rps_dev_flow)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700688
689/*
690 * The rps_sock_flow_table contains mappings of flows to the last CPU
691 * on which they were processed by the application (set in recvmsg).
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500692 * Each entry is a 32bit value. Upper part is the high-order bits
693 * of flow hash, lower part is CPU number.
Eric Dumazet567e4b72015-02-06 12:59:01 -0800694 * rps_cpu_mask is used to partition the space, depending on number of
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500695 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
696 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
Eric Dumazet567e4b72015-02-06 12:59:01 -0800697 * meaning we use 32-6=26 bits for the hash.
Tom Herbertfec5e652010-04-16 16:01:27 -0700698 */
699struct rps_sock_flow_table {
Eric Dumazet567e4b72015-02-06 12:59:01 -0800700 u32 mask;
Eric Dumazet93c1af62015-02-08 20:39:13 -0800701
702 u32 ents[0] ____cacheline_aligned_in_smp;
Tom Herbertfec5e652010-04-16 16:01:27 -0700703};
Eric Dumazet567e4b72015-02-06 12:59:01 -0800704#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
Tom Herbertfec5e652010-04-16 16:01:27 -0700705
706#define RPS_NO_CPU 0xffff
707
Eric Dumazet567e4b72015-02-06 12:59:01 -0800708extern u32 rps_cpu_mask;
709extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
710
Tom Herbertfec5e652010-04-16 16:01:27 -0700711static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
712 u32 hash)
713{
714 if (table && hash) {
Eric Dumazet567e4b72015-02-06 12:59:01 -0800715 unsigned int index = hash & table->mask;
716 u32 val = hash & ~rps_cpu_mask;
Tom Herbertfec5e652010-04-16 16:01:27 -0700717
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500718 /* We only give a hint, preemption can change CPU under us */
Eric Dumazet567e4b72015-02-06 12:59:01 -0800719 val |= raw_smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -0700720
Eric Dumazet567e4b72015-02-06 12:59:01 -0800721 if (table->ents[index] != val)
722 table->ents[index] = val;
Tom Herbertfec5e652010-04-16 16:01:27 -0700723 }
724}
725
Ben Hutchingsc4454772011-01-19 11:03:53 +0000726#ifdef CONFIG_RFS_ACCEL
Joe Perchesf629d202013-09-26 14:48:15 -0700727bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
728 u16 filter_id);
Ben Hutchingsc4454772011-01-19 11:03:53 +0000729#endif
Michael Daltona953be52014-01-16 22:23:28 -0800730#endif /* CONFIG_RPS */
Ben Hutchingsc4454772011-01-19 11:03:53 +0000731
Tom Herbert0a9627f2010-03-16 08:03:29 +0000732/* This structure contains an instance of an RX queue. */
733struct netdev_rx_queue {
Michael Daltona953be52014-01-16 22:23:28 -0800734#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000735 struct rps_map __rcu *rps_map;
736 struct rps_dev_flow_table __rcu *rps_flow_table;
Michael Daltona953be52014-01-16 22:23:28 -0800737#endif
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000738 struct kobject kobj;
Tom Herbertfe822242010-11-09 10:47:38 +0000739 struct net_device *dev;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +0100740 struct xdp_rxq_info xdp_rxq;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000741} ____cacheline_aligned_in_smp;
Michael Daltona953be52014-01-16 22:23:28 -0800742
743/*
744 * RX queue sysfs structures and functions.
745 */
746struct rx_queue_attribute {
747 struct attribute attr;
stephen hemminger718ad682017-08-18 13:46:24 -0700748 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
Michael Daltona953be52014-01-16 22:23:28 -0800749 ssize_t (*store)(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700750 const char *buf, size_t len);
Michael Daltona953be52014-01-16 22:23:28 -0800751};
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800752
Tom Herbertbf264142010-11-26 08:36:09 +0000753#ifdef CONFIG_XPS
754/*
755 * This structure holds an XPS map which can be of variable length. The
756 * map is an array of queues.
757 */
758struct xps_map {
759 unsigned int len;
760 unsigned int alloc_len;
761 struct rcu_head rcu;
762 u16 queues[0];
763};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000764#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
Helge Dellerc59f4192015-10-25 10:00:32 +0100765#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
766 - sizeof(struct xps_map)) / sizeof(u16))
Tom Herbertbf264142010-11-26 08:36:09 +0000767
768/*
769 * This structure holds all XPS maps for device. Maps are indexed by CPU.
770 */
771struct xps_dev_maps {
772 struct rcu_head rcu;
Amritha Nambiar80d19662018-06-29 21:26:41 -0700773 struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
Tom Herbertbf264142010-11-26 08:36:09 +0000774};
Amritha Nambiar80d19662018-06-29 21:26:41 -0700775
776#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
Alexander Duyck184c4492016-10-28 11:50:13 -0400777 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
Amritha Nambiar80d19662018-06-29 21:26:41 -0700778
779#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
780 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
781
Tom Herbertbf264142010-11-26 08:36:09 +0000782#endif /* CONFIG_XPS */
783
John Fastabend4f57c082011-01-17 08:06:04 +0000784#define TC_MAX_QUEUE 16
785#define TC_BITMASK 15
786/* HW offloaded queuing disciplines txq count and offset maps */
787struct netdev_tc_txq {
788 u16 count;
789 u16 offset;
790};
791
Neerav Parikh68bad942012-01-04 20:23:39 +0000792#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
793/*
794 * This structure is to hold information about the device
795 * configured to run FCoE protocol stack.
796 */
797struct netdev_fcoe_hbainfo {
798 char manufacturer[64];
799 char serial_number[64];
800 char hardware_version[64];
801 char driver_version[64];
802 char optionrom_version[64];
803 char firmware_version[64];
804 char model[256];
805 char model_description[256];
806};
807#endif
808
Jiri Pirko02637fc2014-11-28 14:34:16 +0100809#define MAX_PHYS_ITEM_ID_LEN 32
Jiri Pirko66b52b02013-07-29 18:16:49 +0200810
Jiri Pirko02637fc2014-11-28 14:34:16 +0100811/* This structure holds a unique identifier to identify some
812 * physical item (port for example) used by a netdevice.
Jiri Pirko66b52b02013-07-29 18:16:49 +0200813 */
Jiri Pirko02637fc2014-11-28 14:34:16 +0100814struct netdev_phys_item_id {
815 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
Jiri Pirko66b52b02013-07-29 18:16:49 +0200816 unsigned char id_len;
817};
818
Scott Feldmand754f982015-07-18 18:24:49 -0700819static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
820 struct netdev_phys_item_id *b)
821{
822 return a->id_len == b->id_len &&
823 memcmp(a->id, b->id, a->id_len) == 0;
824}
825
Daniel Borkmann99932d42014-02-16 15:55:20 +0100826typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
Alexander Duyck8ec56fc2018-07-09 12:20:04 -0400827 struct sk_buff *skb,
828 struct net_device *sb_dev);
Daniel Borkmann99932d42014-02-16 15:55:20 +0100829
Jiri Pirko2572ac52017-08-07 10:15:17 +0200830enum tc_setup_type {
Nogah Frankel575ed7d2017-11-06 07:23:42 +0100831 TC_SETUP_QDISC_MQPRIO,
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800832 TC_SETUP_CLSU32,
Amir Vadai5b33f482016-03-08 12:42:29 +0200833 TC_SETUP_CLSFLOWER,
Jiri Pirkoade9b652017-08-07 10:15:18 +0200834 TC_SETUP_CLSMATCHALL,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100835 TC_SETUP_CLSBPF,
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200836 TC_SETUP_BLOCK,
Nogah Frankel8521db42017-11-06 07:23:43 +0100837 TC_SETUP_QDISC_CBS,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100838 TC_SETUP_QDISC_RED,
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100839 TC_SETUP_QDISC_PRIO,
Jakub Kicinskif971b132018-05-25 21:53:35 -0700840 TC_SETUP_QDISC_MQ,
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700841 TC_SETUP_QDISC_ETF,
John Fastabend16e5cc62016-02-16 21:16:43 -0800842};
843
Jakub Kicinskif4e63522017-11-03 13:56:16 -0700844/* These structures hold the attributes of bpf state that are being passed
845 * to the netdevice through the bpf op.
Brenden Blancoa7862b42016-07-19 12:16:48 -0700846 */
Jakub Kicinskif4e63522017-11-03 13:56:16 -0700847enum bpf_netdev_command {
Brenden Blancoa7862b42016-07-19 12:16:48 -0700848 /* Set or clear a bpf program used in the earliest stages of packet
849 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
850 * is responsible for calling bpf_prog_put on any old progs that are
851 * stored. In case of error, the callee need not release the new prog
852 * reference, but on success it takes ownership and must bpf_prog_put
853 * when it is no longer used.
854 */
855 XDP_SETUP_PROG,
Jakub Kicinskiee5d0322017-06-21 18:25:04 -0700856 XDP_SETUP_PROG_HW,
Brenden Blancoa7862b42016-07-19 12:16:48 -0700857 XDP_QUERY_PROG,
Jakub Kicinskia25717d2018-07-11 20:36:41 -0700858 XDP_QUERY_PROG_HW,
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700859 /* BPF program for offload callbacks, invoked at program load time. */
860 BPF_OFFLOAD_VERIFIER_PREP,
861 BPF_OFFLOAD_TRANSLATE,
862 BPF_OFFLOAD_DESTROY,
Jakub Kicinskia3884572018-01-11 20:29:09 -0800863 BPF_OFFLOAD_MAP_ALLOC,
864 BPF_OFFLOAD_MAP_FREE,
Björn Töpel74515c52018-06-04 14:05:53 +0200865 XDP_QUERY_XSK_UMEM,
866 XDP_SETUP_XSK_UMEM,
Brenden Blancoa7862b42016-07-19 12:16:48 -0700867};
868
Jakub Kicinskicae19272017-12-27 18:39:05 -0800869struct bpf_prog_offload_ops;
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700870struct netlink_ext_ack;
Björn Töpel74515c52018-06-04 14:05:53 +0200871struct xdp_umem;
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700872
Jakub Kicinskif4e63522017-11-03 13:56:16 -0700873struct netdev_bpf {
874 enum bpf_netdev_command command;
Brenden Blancoa7862b42016-07-19 12:16:48 -0700875 union {
876 /* XDP_SETUP_PROG */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700877 struct {
Jakub Kicinski32d60272017-06-21 18:25:03 -0700878 u32 flags;
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700879 struct bpf_prog *prog;
880 struct netlink_ext_ack *extack;
881 };
Jakub Kicinskia25717d2018-07-11 20:36:41 -0700882 /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
Martin KaFai Lau58038692017-06-15 17:29:09 -0700883 struct {
Martin KaFai Lau58038692017-06-15 17:29:09 -0700884 u32 prog_id;
Jakub Kicinski92f02922017-12-01 15:08:56 -0800885 /* flags with which program was installed */
886 u32 prog_flags;
Martin KaFai Lau58038692017-06-15 17:29:09 -0700887 };
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700888 /* BPF_OFFLOAD_VERIFIER_PREP */
889 struct {
890 struct bpf_prog *prog;
Jakub Kicinskicae19272017-12-27 18:39:05 -0800891 const struct bpf_prog_offload_ops *ops; /* callee set */
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700892 } verifier;
893 /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
894 struct {
895 struct bpf_prog *prog;
896 } offload;
Jakub Kicinskia3884572018-01-11 20:29:09 -0800897 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
898 struct {
899 struct bpf_offloaded_map *offmap;
900 };
Jakub Kicinski84c6b862018-07-30 20:43:53 -0700901 /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
Björn Töpel74515c52018-06-04 14:05:53 +0200902 struct {
Jakub Kicinski84c6b862018-07-30 20:43:53 -0700903 struct xdp_umem *umem; /* out for query*/
904 u16 queue_id; /* in for query */
Björn Töpel74515c52018-06-04 14:05:53 +0200905 } xsk;
Brenden Blancoa7862b42016-07-19 12:16:48 -0700906 };
907};
John Fastabend16e5cc62016-02-16 21:16:43 -0800908
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200909#ifdef CONFIG_XFRM_OFFLOAD
910struct xfrmdev_ops {
911 int (*xdo_dev_state_add) (struct xfrm_state *x);
912 void (*xdo_dev_state_delete) (struct xfrm_state *x);
913 void (*xdo_dev_state_free) (struct xfrm_state *x);
914 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
915 struct xfrm_state *x);
Yossef Efraim50bd8702018-01-14 11:39:10 +0200916 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200917};
918#endif
919
Ilya Lesokhina5c37c62018-04-30 10:16:13 +0300920#if IS_ENABLED(CONFIG_TLS_DEVICE)
921enum tls_offload_ctx_dir {
922 TLS_OFFLOAD_CTX_DIR_RX,
923 TLS_OFFLOAD_CTX_DIR_TX,
924};
925
926struct tls_crypto_info;
927struct tls_context;
928
929struct tlsdev_ops {
930 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
931 enum tls_offload_ctx_dir direction,
932 struct tls_crypto_info *crypto_info,
933 u32 start_offload_tcp_sn);
934 void (*tls_dev_del)(struct net_device *netdev,
935 struct tls_context *ctx,
936 enum tls_offload_ctx_dir direction);
Boris Pismenny16e4edc2018-07-13 14:33:37 +0300937 void (*tls_dev_resync_rx)(struct net_device *netdev,
938 struct sock *sk, u32 seq, u64 rcd_sn);
Ilya Lesokhina5c37c62018-04-30 10:16:13 +0300939};
940#endif
941
Florian Westphal6c557002017-10-02 23:50:05 +0200942struct dev_ifalias {
943 struct rcu_head rcuhead;
944 char ifalias[];
945};
946
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800947/*
948 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800949 * The following hooks can be defined; unless noted otherwise, they are
950 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800951 *
952 * int (*ndo_init)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500953 * This function is called once when a network device is registered.
954 * The network device can use this for any late stage initialization
955 * or semantic validation. It can fail with an error code which will
956 * be propagated back to register_netdev.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800957 *
958 * void (*ndo_uninit)(struct net_device *dev);
959 * This function is called when device is unregistered or when registration
960 * fails. It is not called if init fails.
961 *
962 * int (*ndo_open)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500963 * This function is called when a network device transitions to the up
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800964 * state.
965 *
966 * int (*ndo_stop)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500967 * This function is called when a network device transitions to the down
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800968 * state.
969 *
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000970 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
971 * struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800972 * Called when a packet needs to be transmitted.
Rusty Russelle79d8422015-04-03 22:17:17 +1030973 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
974 * the queue before that can happen; it's for obsolete devices and weird
975 * corner cases, but the stack really does a non-trivial amount
976 * of useless work if you return NETDEV_TX_BUSY.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500977 * Required; cannot be NULL.
Stephen Hemminger00829822008-11-20 20:14:53 -0800978 *
Dimitris Michailidis1a2a1442017-01-31 16:03:13 -0800979 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
980 * struct net_device *dev
981 * netdev_features_t features);
982 * Called by core transmit path to determine if device is capable of
983 * performing offload operations on a given packet. This is to give
984 * the device an opportunity to implement any restrictions that cannot
985 * be otherwise expressed by feature flags. The check is called with
986 * the set of features that the stack has calculated and it returns
987 * those the driver believes to be appropriate.
Eric Dumazetcdba7562016-01-06 06:53:50 -0800988 *
Jason Wangf663dd92014-01-10 16:18:26 +0800989 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
Alexander Duyck4f49dec2018-07-09 12:19:59 -0400990 * struct net_device *sb_dev,
991 * select_queue_fallback_t fallback);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500992 * Called to decide which queue to use when device supports multiple
Stephen Hemminger00829822008-11-20 20:14:53 -0800993 * transmit queues.
994 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800995 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
996 * This function is called to allow device receiver to make
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500997 * changes to configuration when multicast or promiscuous is enabled.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800998 *
999 * void (*ndo_set_rx_mode)(struct net_device *dev);
1000 * This function is called device changes address list filtering.
Jiri Pirko01789342011-08-16 06:29:00 +00001001 * If driver handles unicast address filtering, it should set
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001002 * IFF_UNICAST_FLT in its priv_flags.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001003 *
1004 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1005 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -07001006 * needs to be changed. If this interface is not defined, the
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001007 * MAC address can not be changed.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001008 *
1009 * int (*ndo_validate_addr)(struct net_device *dev);
1010 * Test if Media Access Control address is valid for the device.
1011 *
1012 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001013 * Called when a user requests an ioctl which can't be handled by
1014 * the generic interface code. If not defined ioctls return
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001015 * not supported error code.
1016 *
1017 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1018 * Used to set network devices bus interface parameters. This interface
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001019 * is retained for legacy reasons; new devices should use the bus
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001020 * interface (PCI) for low level management.
1021 *
1022 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1023 * Called when a user wants to change the Maximum Transfer Unit
Magnus Dammdb46a0e2017-06-14 16:15:24 +09001024 * of a device.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001025 *
Stephen Hemminger00829822008-11-20 20:14:53 -08001026 * void (*ndo_tx_timeout)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001027 * Callback used when the transmitter has not made any progress
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001028 * for dev->watchdog ticks.
1029 *
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001030 * void (*ndo_get_stats64)(struct net_device *dev,
1031 * struct rtnl_link_stats64 *storage);
Wolfram Sangd308e382009-10-07 13:53:11 -07001032 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001033 * Called when a user wants to get the network device usage
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00001034 * statistics. Drivers must do one of the following:
Ben Hutchings3cfde792010-07-09 09:11:52 +00001035 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1036 * rtnl_link_stats64 structure passed by the caller.
Ben Hutchings82695d92010-06-15 15:08:48 -07001037 * 2. Define @ndo_get_stats to update a net_device_stats structure
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00001038 * (which should normally be dev->stats) and return a pointer to
1039 * it. The structure may be changed asynchronously only if each
1040 * field is written atomically.
1041 * 3. Update dev->stats asynchronously and atomically, and define
1042 * neither operation.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001043 *
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001044 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
Nogah Frankel2c9d85d2016-09-16 15:05:36 +02001045 * Return true if this device supports offload stats of this attr_id.
1046 *
1047 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1048 * void *attr_data)
1049 * Get statistics for offload operations by attr_id. Write it into the
1050 * attr_data pointer.
1051 *
B Viswanath5d632cb2015-01-12 14:46:25 +05301052 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001053 * If device supports VLAN filtering this function is called when a
Patrick McHardy80d5c362013-04-19 02:04:28 +00001054 * VLAN id is registered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001055 *
B Viswanath5d632cb2015-01-12 14:46:25 +05301056 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001057 * If device supports VLAN filtering this function is called when a
Patrick McHardy80d5c362013-04-19 02:04:28 +00001058 * VLAN id is unregistered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001059 *
1060 * void (*ndo_poll_controller)(struct net_device *dev);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001061 *
1062 * SR-IOV management functions.
1063 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
Moshe Shemesh79aab092016-09-22 12:11:15 +03001064 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1065 * u8 qos, __be16 proto);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001066 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1067 * int max_tx_rate);
Greg Rose5f8444a2011-10-08 03:05:24 +00001068 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
Hiroshi Shimamotodd461d62015-08-28 06:57:55 +00001069 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001070 * int (*ndo_get_vf_config)(struct net_device *dev,
1071 * int vf, struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +03001072 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -07001073 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1074 * struct nlattr *port[]);
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001075 *
1076 * Enable or disable the VF ability to query its RSS Redirection Table and
1077 * Hash Key. This is needed since on some devices VF share this information
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001078 * with PF and querying it may introduce a theoretical security risk.
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001079 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
Scott Feldman57b61082010-05-17 22:49:55 -07001080 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
Jiri Pirko2572ac52017-08-07 10:15:17 +02001081 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001082 * void *type_data);
Florian Fainelli6a4bc2b2017-01-26 14:44:17 -08001083 * Called to setup any 'tc' scheduler, classifier or action on @dev.
1084 * This is always called from the stack with the rtnl lock held and netif
1085 * tx queues stopped. This allows the netdevice to perform queue
1086 * management safely.
Ben Hutchingsc4454772011-01-19 11:03:53 +00001087 *
Yi Zoue9bce842011-03-09 08:48:03 +00001088 * Fiber Channel over Ethernet (FCoE) offload functions.
1089 * int (*ndo_fcoe_enable)(struct net_device *dev);
1090 * Called when the FCoE protocol stack wants to start using LLD for FCoE
1091 * so the underlying device can perform whatever needed configuration or
1092 * initialization to support acceleration of FCoE traffic.
1093 *
1094 * int (*ndo_fcoe_disable)(struct net_device *dev);
1095 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
1096 * so the underlying device can perform whatever needed clean-ups to
1097 * stop supporting acceleration of FCoE traffic.
1098 *
1099 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1100 * struct scatterlist *sgl, unsigned int sgc);
1101 * Called when the FCoE Initiator wants to initialize an I/O that
1102 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1103 * perform necessary setup and returns 1 to indicate the device is set up
1104 * successfully to perform DDP on this I/O, otherwise this returns 0.
1105 *
1106 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
1107 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
1108 * indicated by the FC exchange id 'xid', so the underlying device can
1109 * clean up and reuse resources for later DDP requests.
1110 *
1111 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1112 * struct scatterlist *sgl, unsigned int sgc);
1113 * Called when the FCoE Target wants to initialize an I/O that
1114 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1115 * perform necessary setup and returns 1 to indicate the device is set up
1116 * successfully to perform DDP on this I/O, otherwise this returns 0.
1117 *
Neerav Parikh68bad942012-01-04 20:23:39 +00001118 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1119 * struct netdev_fcoe_hbainfo *hbainfo);
1120 * Called when the FCoE Protocol stack wants information on the underlying
1121 * device. This information is utilized by the FCoE protocol stack to
1122 * register attributes with Fiber Channel management service as per the
1123 * FC-GS Fabric Device Management Information(FDMI) specification.
1124 *
Yi Zoue9bce842011-03-09 08:48:03 +00001125 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1126 * Called when the underlying device wants to override default World Wide
1127 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1128 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1129 * protocol stack to use.
1130 *
Ben Hutchingsc4454772011-01-19 11:03:53 +00001131 * RFS acceleration.
1132 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1133 * u16 rxq_index, u32 flow_id);
1134 * Set hardware filter for RFS. rxq_index is the target queue index;
1135 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1136 * Return the filter ID on success, or a negative error code.
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001137 *
Jiri Pirko8b98a702013-01-03 22:49:02 +00001138 * Slave management functions (for bridge, bonding, etc).
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001139 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1140 * Called to make another netdev an underling.
1141 *
1142 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1143 * Called to release previously enslaved netdev.
Michał Mirosław5455c692011-02-15 16:59:17 +00001144 *
1145 * Feature/offload setting functions.
Dimitris Michailidis1a2a1442017-01-31 16:03:13 -08001146 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1147 * netdev_features_t features);
1148 * Adjusts the requested feature flags according to device-specific
1149 * constraints, and returns the resulting flags. Must not modify
1150 * the device state.
1151 *
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001152 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001153 * Called to update device configuration to new features. Passed
1154 * feature set might be less than what was returned by ndo_fix_features()).
1155 * Must return >0 or -errno if it changed dev->features itself.
1156 *
stephen hemmingeredc7d572012-10-01 12:32:33 +00001157 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1158 * struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01001159 * const unsigned char *addr, u16 vid, u16 flags)
John Fastabend77162022012-04-15 06:43:56 +00001160 * Adds an FDB entry to dev for addr.
Vlad Yasevich1690be62013-02-13 12:00:18 +00001161 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1162 * struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01001163 * const unsigned char *addr, u16 vid)
John Fastabend77162022012-04-15 06:43:56 +00001164 * Deletes the FDB entry from dev coresponding to addr.
1165 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
Jamal Hadi Salim5d5eacb2014-07-10 07:01:58 -04001166 * struct net_device *dev, struct net_device *filter_dev,
Roopa Prabhud2976532016-08-30 21:56:45 -07001167 * int *idx)
John Fastabend77162022012-04-15 06:43:56 +00001168 * Used to add FDB entries to dump requests. Implementers should add
1169 * entries to skb and update idx with the number of entries.
John Fastabende5a55a82012-10-24 08:12:57 +00001170 *
Nicolas Dichtelad41faa2015-03-17 11:16:00 +01001171 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1172 * u16 flags)
John Fastabende5a55a82012-10-24 08:12:57 +00001173 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02001174 * struct net_device *dev, u32 filter_mask,
1175 * int nlflags)
Nicolas Dichtelad41faa2015-03-17 11:16:00 +01001176 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1177 * u16 flags);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001178 *
1179 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1180 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1181 * which do not represent real hardware may define this to allow their
1182 * userspace components to manage their virtual carrier state. Devices
1183 * that determine carrier state from physical hardware properties (eg
1184 * network cables) or protocol-dependent mechanisms (eg
1185 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
Jiri Pirko66b52b02013-07-29 18:16:49 +02001186 *
1187 * int (*ndo_get_phys_port_id)(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01001188 * struct netdev_phys_item_id *ppid);
Jiri Pirko66b52b02013-07-29 18:16:49 +02001189 * Called to get ID of physical port of this device. If driver does
1190 * not implement this, it is assumed that the hw is not able to have
1191 * multiple net devices on single physical port.
Joseph Gasparakis53cf52752013-09-04 02:13:38 -07001192 *
Alexander Duyck7c46a642016-06-16 12:21:00 -07001193 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1194 * struct udp_tunnel_info *ti);
1195 * Called by UDP tunnel to notify a driver about the UDP port and socket
1196 * address family that a UDP tunnel is listnening to. It is called only
1197 * when a new port starts listening. The operation is protected by the
1198 * RTNL.
1199 *
1200 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1201 * struct udp_tunnel_info *ti);
1202 * Called by UDP tunnel to notify the driver about a UDP port and socket
1203 * address family that the UDP tunnel is not listening to anymore. The
1204 * operation is protected by the RTNL.
1205 *
John Fastabenda6cc0cf2013-11-06 09:54:46 -08001206 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1207 * struct net_device *dev)
1208 * Called by upper layer devices to accelerate switching or other
1209 * station functionality into hardware. 'pdev is the lowerdev
1210 * to use for the offload and 'dev' is the net device that will
1211 * back the offload. Returns a pointer to the private structure
1212 * the upper layer will maintain.
1213 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1214 * Called by upper layer device to delete the station created
1215 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1216 * the station and priv is the structure returned by the add
1217 * operation.
John Fastabend822b3b22015-03-18 14:57:33 +02001218 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1219 * int queue_index, u32 maxrate);
1220 * Called when a user wants to set a max-rate limitation of specific
1221 * TX queue.
Nicolas Dichtela54acb32015-04-02 17:07:00 +02001222 * int (*ndo_get_iflink)(const struct net_device *dev);
1223 * Called to get the iflink value of this device.
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001224 * void (*ndo_change_proto_down)(struct net_device *dev,
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001225 * bool proto_down);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001226 * This function is used to pass protocol port error state information
1227 * to the switch driver. The switch driver can react to the proto_down
1228 * by doing a phys down on the associated switch port.
Pravin B Shelarfc4099f2015-10-22 18:17:16 -07001229 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1230 * This function is used to get egress tunnel information for given skb.
1231 * This is useful for retrieving outer tunnel header parameters while
1232 * sampling packet.
Paolo Abeni871b6422016-02-26 10:45:37 +01001233 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1234 * This function is used to specify the headroom that the skb must
1235 * consider when allocation skb during packet reception. Setting
1236 * appropriate rx headroom value allows avoiding skb head copy on
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001237 * forward. Setting a negative value resets the rx headroom to the
Paolo Abeni871b6422016-02-26 10:45:37 +01001238 * default value.
Jakub Kicinskif4e63522017-11-03 13:56:16 -07001239 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
Brenden Blancoa7862b42016-07-19 12:16:48 -07001240 * This function is used to set or query state related to XDP on the
Jakub Kicinskif4e63522017-11-03 13:56:16 -07001241 * netdevice and manage BPF offload. See definition of
1242 * enum bpf_netdev_command for details.
Jesper Dangaard Brouer42b33462018-05-31 10:59:47 +02001243 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1244 * u32 flags);
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +02001245 * This function is used to submit @n XDP packets for transmit on a
1246 * netdevice. Returns number of frames successfully transmitted, frames
1247 * that got dropped are freed/returned via xdp_return_frame().
1248 * Returns negative number, means general error invoking ndo, meaning
1249 * no frames were xmit'ed and core-caller will free all frames.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001250 */
1251struct net_device_ops {
1252 int (*ndo_init)(struct net_device *dev);
1253 void (*ndo_uninit)(struct net_device *dev);
1254 int (*ndo_open)(struct net_device *dev);
1255 int (*ndo_stop)(struct net_device *dev);
Eric Dumazetcdba7562016-01-06 06:53:50 -08001256 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1257 struct net_device *dev);
1258 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1259 struct net_device *dev,
1260 netdev_features_t features);
Stephen Hemminger00829822008-11-20 20:14:53 -08001261 u16 (*ndo_select_queue)(struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08001262 struct sk_buff *skb,
Alexander Duyck4f49dec2018-07-09 12:19:59 -04001263 struct net_device *sb_dev,
Daniel Borkmann99932d42014-02-16 15:55:20 +01001264 select_queue_fallback_t fallback);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001265 void (*ndo_change_rx_flags)(struct net_device *dev,
1266 int flags);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001267 void (*ndo_set_rx_mode)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001268 int (*ndo_set_mac_address)(struct net_device *dev,
1269 void *addr);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001270 int (*ndo_validate_addr)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001271 int (*ndo_do_ioctl)(struct net_device *dev,
1272 struct ifreq *ifr, int cmd);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001273 int (*ndo_set_config)(struct net_device *dev,
1274 struct ifmap *map);
Stephen Hemminger00829822008-11-20 20:14:53 -08001275 int (*ndo_change_mtu)(struct net_device *dev,
1276 int new_mtu);
1277 int (*ndo_neigh_setup)(struct net_device *dev,
1278 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001279 void (*ndo_tx_timeout) (struct net_device *dev);
1280
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001281 void (*ndo_get_stats64)(struct net_device *dev,
1282 struct rtnl_link_stats64 *storage);
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001283 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
Nogah Frankel2c9d85d2016-09-16 15:05:36 +02001284 int (*ndo_get_offload_stats)(int attr_id,
1285 const struct net_device *dev,
1286 void *attr_data);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001287 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1288
Jiri Pirko8e586132011-12-08 19:52:37 -05001289 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00001290 __be16 proto, u16 vid);
Jiri Pirko8e586132011-12-08 19:52:37 -05001291 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00001292 __be16 proto, u16 vid);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001293#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001294 void (*ndo_poll_controller)(struct net_device *dev);
Herbert Xu4247e162010-06-10 16:12:47 +00001295 int (*ndo_netpoll_setup)(struct net_device *dev,
Eric W. Biedermana8779ec2014-03-27 15:36:38 -07001296 struct netpoll_info *info);
WANG Cong0e34e932010-05-06 00:47:21 -07001297 void (*ndo_netpoll_cleanup)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001298#endif
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001299 int (*ndo_set_vf_mac)(struct net_device *dev,
1300 int queue, u8 *mac);
1301 int (*ndo_set_vf_vlan)(struct net_device *dev,
Moshe Shemesh79aab092016-09-22 12:11:15 +03001302 int queue, u16 vlan,
1303 u8 qos, __be16 proto);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001304 int (*ndo_set_vf_rate)(struct net_device *dev,
1305 int vf, int min_tx_rate,
1306 int max_tx_rate);
Greg Rose5f8444a2011-10-08 03:05:24 +00001307 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1308 int vf, bool setting);
Hiroshi Shimamotodd461d62015-08-28 06:57:55 +00001309 int (*ndo_set_vf_trust)(struct net_device *dev,
1310 int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001311 int (*ndo_get_vf_config)(struct net_device *dev,
1312 int vf,
1313 struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +03001314 int (*ndo_set_vf_link_state)(struct net_device *dev,
1315 int vf, int link_state);
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +03001316 int (*ndo_get_vf_stats)(struct net_device *dev,
1317 int vf,
1318 struct ifla_vf_stats
1319 *vf_stats);
Scott Feldman57b61082010-05-17 22:49:55 -07001320 int (*ndo_set_vf_port)(struct net_device *dev,
1321 int vf,
1322 struct nlattr *port[]);
1323 int (*ndo_get_vf_port)(struct net_device *dev,
1324 int vf, struct sk_buff *skb);
Eli Cohencc8e27c2016-03-11 22:58:34 +02001325 int (*ndo_set_vf_guid)(struct net_device *dev,
1326 int vf, u64 guid,
1327 int guid_type);
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001328 int (*ndo_set_vf_rss_query_en)(
1329 struct net_device *dev,
1330 int vf, bool setting);
John Fastabend16e5cc62016-02-16 21:16:43 -08001331 int (*ndo_setup_tc)(struct net_device *dev,
Jiri Pirko2572ac52017-08-07 10:15:17 +02001332 enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001333 void *type_data);
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001334#if IS_ENABLED(CONFIG_FCOE)
Yi Zoucb454392009-08-31 12:31:36 +00001335 int (*ndo_fcoe_enable)(struct net_device *dev);
1336 int (*ndo_fcoe_disable)(struct net_device *dev);
Yi Zou4d288d52009-02-27 14:06:59 -08001337 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1338 u16 xid,
1339 struct scatterlist *sgl,
1340 unsigned int sgc);
1341 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1342 u16 xid);
Yi Zou6247e082011-02-01 07:22:06 +00001343 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1344 u16 xid,
1345 struct scatterlist *sgl,
1346 unsigned int sgc);
Neerav Parikh68bad942012-01-04 20:23:39 +00001347 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1348 struct netdev_fcoe_hbainfo *hbainfo);
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001349#endif
1350
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001351#if IS_ENABLED(CONFIG_LIBFCOE)
Yi Zoudf5c7942009-10-28 18:24:35 +00001352#define NETDEV_FCOE_WWNN 0
1353#define NETDEV_FCOE_WWPN 1
1354 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1355 u64 *wwn, int type);
Yi Zou4d288d52009-02-27 14:06:59 -08001356#endif
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001357
Ben Hutchingsc4454772011-01-19 11:03:53 +00001358#ifdef CONFIG_RFS_ACCEL
1359 int (*ndo_rx_flow_steer)(struct net_device *dev,
1360 const struct sk_buff *skb,
1361 u16 rxq_index,
1362 u32 flow_id);
1363#endif
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001364 int (*ndo_add_slave)(struct net_device *dev,
David Ahern33eaf2a2017-10-04 17:48:46 -07001365 struct net_device *slave_dev,
1366 struct netlink_ext_ack *extack);
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001367 int (*ndo_del_slave)(struct net_device *dev,
1368 struct net_device *slave_dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001369 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1370 netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001371 int (*ndo_set_features)(struct net_device *dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001372 netdev_features_t features);
Jiri Pirko503eebc2016-07-05 11:27:37 +02001373 int (*ndo_neigh_construct)(struct net_device *dev,
1374 struct neighbour *n);
1375 void (*ndo_neigh_destroy)(struct net_device *dev,
1376 struct neighbour *n);
John Fastabend77162022012-04-15 06:43:56 +00001377
1378 int (*ndo_fdb_add)(struct ndmsg *ndm,
stephen hemmingeredc7d572012-10-01 12:32:33 +00001379 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001380 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001381 const unsigned char *addr,
Jiri Pirkof6f64242014-11-28 14:34:15 +01001382 u16 vid,
John Fastabend77162022012-04-15 06:43:56 +00001383 u16 flags);
1384 int (*ndo_fdb_del)(struct ndmsg *ndm,
Vlad Yasevich1690be62013-02-13 12:00:18 +00001385 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001386 struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01001387 const unsigned char *addr,
1388 u16 vid);
John Fastabend77162022012-04-15 06:43:56 +00001389 int (*ndo_fdb_dump)(struct sk_buff *skb,
1390 struct netlink_callback *cb,
1391 struct net_device *dev,
Jamal Hadi Salim5d5eacb2014-07-10 07:01:58 -04001392 struct net_device *filter_dev,
Roopa Prabhud2976532016-08-30 21:56:45 -07001393 int *idx);
John Fastabende5a55a82012-10-24 08:12:57 +00001394
1395 int (*ndo_bridge_setlink)(struct net_device *dev,
Roopa Prabhuadd511b2015-01-29 22:40:12 -08001396 struct nlmsghdr *nlh,
1397 u16 flags);
John Fastabende5a55a82012-10-24 08:12:57 +00001398 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1399 u32 pid, u32 seq,
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00001400 struct net_device *dev,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02001401 u32 filter_mask,
1402 int nlflags);
Vlad Yasevich407af322013-02-13 12:00:12 +00001403 int (*ndo_bridge_dellink)(struct net_device *dev,
Roopa Prabhuadd511b2015-01-29 22:40:12 -08001404 struct nlmsghdr *nlh,
1405 u16 flags);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001406 int (*ndo_change_carrier)(struct net_device *dev,
1407 bool new_carrier);
Jiri Pirko66b52b02013-07-29 18:16:49 +02001408 int (*ndo_get_phys_port_id)(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01001409 struct netdev_phys_item_id *ppid);
David Aherndb24a902015-03-17 20:23:15 -06001410 int (*ndo_get_phys_port_name)(struct net_device *dev,
1411 char *name, size_t len);
Alexander Duyck7c46a642016-06-16 12:21:00 -07001412 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1413 struct udp_tunnel_info *ti);
1414 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1415 struct udp_tunnel_info *ti);
John Fastabenda6cc0cf2013-11-06 09:54:46 -08001416 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1417 struct net_device *dev);
1418 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1419 void *priv);
1420
Vlad Yasevich25175ba2014-05-16 17:04:54 -04001421 int (*ndo_get_lock_subclass)(struct net_device *dev);
John Fastabend822b3b22015-03-18 14:57:33 +02001422 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1423 int queue_index,
1424 u32 maxrate);
Nicolas Dichtela54acb32015-04-02 17:07:00 +02001425 int (*ndo_get_iflink)(const struct net_device *dev);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001426 int (*ndo_change_proto_down)(struct net_device *dev,
1427 bool proto_down);
Pravin B Shelarfc4099f2015-10-22 18:17:16 -07001428 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1429 struct sk_buff *skb);
Paolo Abeni871b6422016-02-26 10:45:37 +01001430 void (*ndo_set_rx_headroom)(struct net_device *dev,
1431 int needed_headroom);
Jakub Kicinskif4e63522017-11-03 13:56:16 -07001432 int (*ndo_bpf)(struct net_device *dev,
1433 struct netdev_bpf *bpf);
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +02001434 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
Jesper Dangaard Brouer42b33462018-05-31 10:59:47 +02001435 struct xdp_frame **xdp,
1436 u32 flags);
Magnus Karlssone3760c72018-06-04 14:05:56 +02001437 int (*ndo_xsk_async_xmit)(struct net_device *dev,
1438 u32 queue_id);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001439};
1440
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001441/**
1442 * enum net_device_priv_flags - &struct net_device priv_flags
1443 *
1444 * These are the &struct net_device, they are only set internally
1445 * by drivers and used in the kernel. These flags are invisible to
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001446 * userspace; this means that the order of these flags can change
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001447 * during any kernel release.
1448 *
1449 * You should have a pretty good reason to be extending these flags.
1450 *
1451 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1452 * @IFF_EBRIDGE: Ethernet bridging device
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001453 * @IFF_BONDING: bonding master or slave
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001454 * @IFF_ISATAP: ISATAP interface (RFC4214)
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001455 * @IFF_WAN_HDLC: WAN HDLC device
1456 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1457 * release skb->dst
1458 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1459 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1460 * @IFF_MACVLAN_PORT: device used as macvlan port
1461 * @IFF_BRIDGE_PORT: device used as bridge port
1462 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1463 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1464 * @IFF_UNICAST_FLT: Supports unicast filtering
1465 * @IFF_TEAM_PORT: device used as team port
1466 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1467 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1468 * change when it's running
1469 * @IFF_MACVLAN: Macvlan device
Luis de Bethencourt6d0e24c2016-03-21 20:58:28 +00001470 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1471 * underlying stacked devices
David Ahern007979e2015-09-29 20:07:10 -07001472 * @IFF_L3MDEV_MASTER: device is an L3 master device
Phil Sutterfa8187c2015-08-13 19:01:06 +02001473 * @IFF_NO_QUEUE: device can run without qdisc attached
Jiri Pirko35d4e172015-08-27 09:31:20 +02001474 * @IFF_OPENVSWITCH: device is a Open vSwitch master
David Ahernfee6d4c2015-10-05 08:51:24 -07001475 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
Jiri Pirkoc981e422015-12-03 12:12:06 +01001476 * @IFF_TEAM: device is a team device
Keller, Jacob Ed4ab4282016-02-08 16:05:03 -08001477 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
Paolo Abeni871b6422016-02-26 10:45:37 +01001478 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1479 * entity (i.e. the master device for bridged veth)
Sabrina Dubroca3c175782016-03-11 18:07:32 +01001480 * @IFF_MACSEC: device is a MACsec device
Paolo Abenif54262502018-03-09 10:39:24 +01001481 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07001482 * @IFF_FAILOVER: device is a failover master device
1483 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001484 */
1485enum netdev_priv_flags {
1486 IFF_802_1Q_VLAN = 1<<0,
1487 IFF_EBRIDGE = 1<<1,
Jiri Pirko0dc15492015-08-27 09:31:21 +02001488 IFF_BONDING = 1<<2,
1489 IFF_ISATAP = 1<<3,
1490 IFF_WAN_HDLC = 1<<4,
1491 IFF_XMIT_DST_RELEASE = 1<<5,
1492 IFF_DONT_BRIDGE = 1<<6,
1493 IFF_DISABLE_NETPOLL = 1<<7,
1494 IFF_MACVLAN_PORT = 1<<8,
1495 IFF_BRIDGE_PORT = 1<<9,
1496 IFF_OVS_DATAPATH = 1<<10,
1497 IFF_TX_SKB_SHARING = 1<<11,
1498 IFF_UNICAST_FLT = 1<<12,
1499 IFF_TEAM_PORT = 1<<13,
1500 IFF_SUPP_NOFCS = 1<<14,
1501 IFF_LIVE_ADDR_CHANGE = 1<<15,
1502 IFF_MACVLAN = 1<<16,
1503 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
Paolo Abeni1ec54cb2018-03-06 10:56:31 +01001504 IFF_L3MDEV_MASTER = 1<<18,
1505 IFF_NO_QUEUE = 1<<19,
1506 IFF_OPENVSWITCH = 1<<20,
1507 IFF_L3MDEV_SLAVE = 1<<21,
1508 IFF_TEAM = 1<<22,
1509 IFF_RXFH_CONFIGURED = 1<<23,
1510 IFF_PHONY_HEADROOM = 1<<24,
1511 IFF_MACSEC = 1<<25,
Paolo Abenif54262502018-03-09 10:39:24 +01001512 IFF_NO_RX_HANDLER = 1<<26,
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07001513 IFF_FAILOVER = 1<<27,
1514 IFF_FAILOVER_SLAVE = 1<<28,
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001515};
1516
1517#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1518#define IFF_EBRIDGE IFF_EBRIDGE
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001519#define IFF_BONDING IFF_BONDING
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001520#define IFF_ISATAP IFF_ISATAP
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001521#define IFF_WAN_HDLC IFF_WAN_HDLC
1522#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1523#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1524#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1525#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1526#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1527#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1528#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1529#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1530#define IFF_TEAM_PORT IFF_TEAM_PORT
1531#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1532#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1533#define IFF_MACVLAN IFF_MACVLAN
Eric Dumazet02875872014-10-05 18:38:35 -07001534#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
David Ahern007979e2015-09-29 20:07:10 -07001535#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
Phil Sutterfa8187c2015-08-13 19:01:06 +02001536#define IFF_NO_QUEUE IFF_NO_QUEUE
Jiri Pirko35d4e172015-08-27 09:31:20 +02001537#define IFF_OPENVSWITCH IFF_OPENVSWITCH
Jiri Pirko8f253482015-11-04 14:59:06 +01001538#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
Jiri Pirkoc981e422015-12-03 12:12:06 +01001539#define IFF_TEAM IFF_TEAM
Keller, Jacob Ed4ab4282016-02-08 16:05:03 -08001540#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
Sabrina Dubroca3c175782016-03-11 18:07:32 +01001541#define IFF_MACSEC IFF_MACSEC
Paolo Abenif54262502018-03-09 10:39:24 +01001542#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07001543#define IFF_FAILOVER IFF_FAILOVER
1544#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001545
Karoly Kemeny536721b2014-07-30 20:27:36 +02001546/**
1547 * struct net_device - The DEVICE structure.
Mauro Carvalho Chehabd6519832017-05-12 09:35:46 -03001548 *
1549 * Actually, this whole structure is a big mistake. It mixes I/O
1550 * data with strictly "high-level" data, and it has to know about
1551 * almost every data structure used in the INET module.
Karoly Kemeny536721b2014-07-30 20:27:36 +02001552 *
1553 * @name: This is the first field of the "visible" part of this structure
1554 * (i.e. as seen by users in the "Space.c" file). It is the name
Mauro Carvalho Chehabd6519832017-05-12 09:35:46 -03001555 * of the interface.
Karoly Kemeny536721b2014-07-30 20:27:36 +02001556 *
1557 * @name_hlist: Device name hash chain, please keep it close to name[]
1558 * @ifalias: SNMP alias
1559 * @mem_end: Shared memory end
1560 * @mem_start: Shared memory start
1561 * @base_addr: Device I/O address
1562 * @irq: Device IRQ number
1563 *
1564 * @state: Generic network queuing layer state, see netdev_state_t
1565 * @dev_list: The global list of network devices
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001566 * @napi_list: List entry used for polling NAPI devices
1567 * @unreg_list: List entry when we are unregistering the
1568 * device; see the function unregister_netdev
1569 * @close_list: List entry used when we are closing the device
Benjamin Poirier62d885fe2016-03-21 14:08:28 -07001570 * @ptype_all: Device-specific packet handlers for all protocols
1571 * @ptype_specific: Device-specific, protocol-specific packet handlers
Karoly Kemeny536721b2014-07-30 20:27:36 +02001572 *
1573 * @adj_list: Directly linked devices, like slaves for bonding
Karoly Kemeny536721b2014-07-30 20:27:36 +02001574 * @features: Currently active device features
1575 * @hw_features: User-changeable features
1576 *
1577 * @wanted_features: User-requested features
1578 * @vlan_features: Mask of features inheritable by VLAN devices
1579 *
1580 * @hw_enc_features: Mask of features inherited by encapsulating devices
1581 * This field indicates what encapsulation
1582 * offloads the hardware is capable of doing,
1583 * and drivers will need to set them appropriately.
1584 *
1585 * @mpls_features: Mask of features inheritable by MPLS
1586 *
1587 * @ifindex: interface index
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001588 * @group: The group the device belongs to
Karoly Kemeny536721b2014-07-30 20:27:36 +02001589 *
1590 * @stats: Statistics struct, which was left as a legacy, use
1591 * rtnl_link_stats64 instead
1592 *
1593 * @rx_dropped: Dropped packets by core network,
1594 * do not use this in drivers
1595 * @tx_dropped: Dropped packets by core network,
1596 * do not use this in drivers
Jarod Wilson6e7333d2016-02-01 18:51:05 -05001597 * @rx_nohandler: nohandler dropped packets by core network on
1598 * inactive devices, do not use this in drivers
Florian Fainelli9e55e5d2018-01-22 19:14:25 -08001599 * @carrier_up_count: Number of times the carrier has been up
1600 * @carrier_down_count: Number of times the carrier has been down
Karoly Kemeny536721b2014-07-30 20:27:36 +02001601 *
Karoly Kemeny536721b2014-07-30 20:27:36 +02001602 * @wireless_handlers: List of functions to handle Wireless Extensions,
1603 * instead of ioctl,
1604 * see <net/iw_handler.h> for details.
1605 * @wireless_data: Instance data managed by the core of wireless extensions
1606 *
1607 * @netdev_ops: Includes several pointers to callbacks,
1608 * if one wants to override the ndo_*() functions
1609 * @ethtool_ops: Management operations
Alexander Aringf997c552016-06-15 21:20:23 +02001610 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1611 * discovery handling. Necessary for e.g. 6LoWPAN.
Eric W. Biedermand4760592015-03-02 00:11:09 -06001612 * @header_ops: Includes callbacks for creating,parsing,caching,etc
Karoly Kemeny536721b2014-07-30 20:27:36 +02001613 * of Layer 2 headers.
1614 *
1615 * @flags: Interface flags (a la BSD)
1616 * @priv_flags: Like 'flags' but invisible to userspace,
1617 * see if.h for the definitions
1618 * @gflags: Global flags ( kept as legacy )
1619 * @padded: How much padding added by alloc_netdev()
1620 * @operstate: RFC2863 operstate
1621 * @link_mode: Mapping policy to operstate
1622 * @if_port: Selectable AUI, TP, ...
1623 * @dma: DMA channel
1624 * @mtu: Interface MTU value
Jarod Wilson61e84622016-10-07 22:04:33 -04001625 * @min_mtu: Interface Minimum MTU value
1626 * @max_mtu: Interface Maximum MTU value
Karoly Kemeny536721b2014-07-30 20:27:36 +02001627 * @type: Interface hardware type
Willem de Bruijn2793a232016-03-09 21:58:32 -05001628 * @hard_header_len: Maximum hardware header length.
Willem de Bruijn217e6fa2017-02-07 15:57:20 -05001629 * @min_header_len: Minimum hardware header length
Karoly Kemeny536721b2014-07-30 20:27:36 +02001630 *
1631 * @needed_headroom: Extra headroom the hardware may need, but not in all
1632 * cases can this be guaranteed
1633 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1634 * cases can this be guaranteed. Some cases also use
1635 * LL_MAX_HEADER instead to allocate the skb
1636 *
1637 * interface address info:
1638 *
1639 * @perm_addr: Permanent hw address
1640 * @addr_assign_type: Hw address assignment type
1641 * @addr_len: Hardware address length
Alexander Aring8626a0c2016-06-15 21:20:16 +02001642 * @neigh_priv_len: Used in neigh_alloc()
Karoly Kemeny536721b2014-07-30 20:27:36 +02001643 * @dev_id: Used to differentiate devices that share
1644 * the same link layer address
1645 * @dev_port: Used to differentiate devices that share
1646 * the same function
1647 * @addr_list_lock: XXX: need comments on this one
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001648 * @uc_promisc: Counter that indicates promiscuous mode
Karoly Kemeny536721b2014-07-30 20:27:36 +02001649 * has been enabled due to the need to listen to
1650 * additional unicast addresses in a device that
1651 * does not implement ndo_set_rx_mode()
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001652 * @uc: unicast mac addresses
1653 * @mc: multicast mac addresses
1654 * @dev_addrs: list of device hw addresses
1655 * @queues_kset: Group of all Kobjects in the Tx and RX queues
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001656 * @promiscuity: Number of times the NIC is told to work in
1657 * promiscuous mode; if it becomes 0 the NIC will
1658 * exit promiscuous mode
Karoly Kemeny536721b2014-07-30 20:27:36 +02001659 * @allmulti: Counter, enables or disables allmulticast mode
1660 *
1661 * @vlan_info: VLAN info
1662 * @dsa_ptr: dsa specific data
1663 * @tipc_ptr: TIPC specific data
1664 * @atalk_ptr: AppleTalk link
1665 * @ip_ptr: IPv4 specific data
1666 * @dn_ptr: DECnet specific data
1667 * @ip6_ptr: IPv6 specific data
1668 * @ax25_ptr: AX.25 specific data
1669 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1670 *
Karoly Kemeny536721b2014-07-30 20:27:36 +02001671 * @dev_addr: Hw address (before bcast,
1672 * because most packets are unicast)
1673 *
1674 * @_rx: Array of RX queues
1675 * @num_rx_queues: Number of RX queues
1676 * allocated at register_netdev() time
1677 * @real_num_rx_queues: Number of RX queues currently active in device
1678 *
1679 * @rx_handler: handler for received packets
1680 * @rx_handler_data: XXX: need comments on this one
Jiri Pirko46209402017-11-03 11:46:25 +01001681 * @miniq_ingress: ingress/clsact qdisc specific data for
1682 * ingress processing
Karoly Kemeny536721b2014-07-30 20:27:36 +02001683 * @ingress_queue: XXX: need comments on this one
1684 * @broadcast: hw bcast address
1685 *
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001686 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1687 * indexed by RX queue number. Assigned by driver.
1688 * This must only be set if the ndo_rx_flow_steer
1689 * operation is defined
1690 * @index_hlist: Device index hash chain
1691 *
Karoly Kemeny536721b2014-07-30 20:27:36 +02001692 * @_tx: Array of TX queues
1693 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1694 * @real_num_tx_queues: Number of TX queues currently active in device
1695 * @qdisc: Root qdisc from userspace point of view
1696 * @tx_queue_len: Max frames per queue allowed
1697 * @tx_global_lock: XXX: need comments on this one
1698 *
1699 * @xps_maps: XXX: need comments on this one
Jiri Pirko46209402017-11-03 11:46:25 +01001700 * @miniq_egress: clsact qdisc specific data for
1701 * egress processing
Karoly Kemeny536721b2014-07-30 20:27:36 +02001702 * @watchdog_timeo: Represents the timeout that is used by
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001703 * the watchdog (see dev_watchdog())
Karoly Kemeny536721b2014-07-30 20:27:36 +02001704 * @watchdog_timer: List of timers
1705 *
1706 * @pcpu_refcnt: Number of references to this device
1707 * @todo_list: Delayed register/unregister
Karoly Kemeny536721b2014-07-30 20:27:36 +02001708 * @link_watch_list: XXX: need comments on this one
1709 *
1710 * @reg_state: Register/unregister state machine
1711 * @dismantle: Device is going to be freed
1712 * @rtnl_link_state: This enum represents the phases of creating
1713 * a new link
1714 *
David S. Millercf124db2017-05-08 12:52:56 -04001715 * @needs_free_netdev: Should unregister perform free_netdev?
1716 * @priv_destructor: Called from unregister
Karoly Kemeny536721b2014-07-30 20:27:36 +02001717 * @npinfo: XXX: need comments on this one
1718 * @nd_net: Network namespace this network device is inside
1719 *
1720 * @ml_priv: Mid-layer private
1721 * @lstats: Loopback statistics
1722 * @tstats: Tunnel statistics
1723 * @dstats: Dummy statistics
1724 * @vstats: Virtual ethernet statistics
1725 *
1726 * @garp_port: GARP
1727 * @mrp_port: MRP
1728 *
1729 * @dev: Class/net/name entry
1730 * @sysfs_groups: Space for optional device, statistics and wireless
1731 * sysfs groups
1732 *
1733 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1734 * @rtnl_link_ops: Rtnl_link_ops
1735 *
1736 * @gso_max_size: Maximum size of generic segmentation offload
1737 * @gso_max_segs: Maximum number of segments that can be passed to the
1738 * NIC for GSO
1739 *
1740 * @dcbnl_ops: Data Center Bridging netlink ops
1741 * @num_tc: Number of traffic classes in the net device
1742 * @tc_to_txq: XXX: need comments on this one
Randy Dunlap920c1cd2016-11-21 18:28:36 -08001743 * @prio_tc_map: XXX: need comments on this one
Karoly Kemeny536721b2014-07-30 20:27:36 +02001744 *
1745 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1746 *
1747 * @priomap: XXX: need comments on this one
1748 * @phydev: Physical device may attach itself
1749 * for hardware timestamping
Russell Kinge679c9c2018-03-28 15:44:16 -07001750 * @sfp_bus: attached &struct sfp_bus structure.
Karoly Kemeny536721b2014-07-30 20:27:36 +02001751 *
Eric Dumazet123b3652016-06-08 07:22:49 -07001752 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1753 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
Karoly Kemeny536721b2014-07-30 20:27:36 +02001754 *
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001755 * @proto_down: protocol port state information can be sent to the
1756 * switch driver and used to set the phys state of the
1757 * switch port.
1758 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 * FIXME: cleanup struct net_device such that network protocol info
1760 * moves out.
1761 */
1762
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001763struct net_device {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 char name[IFNAMSIZ];
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001765 struct hlist_node name_hlist;
Florian Westphal6c557002017-10-02 23:50:05 +02001766 struct dev_ifalias __rcu *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 /*
1768 * I/O specific fields
1769 * FIXME: Merge these and struct ifmap into one
1770 */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001771 unsigned long mem_end;
1772 unsigned long mem_start;
1773 unsigned long base_addr;
1774 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
1776 /*
Karoly Kemeny536721b2014-07-30 20:27:36 +02001777 * Some hardware also needs these fields (state,dev_list,
1778 * napi_list,unreg_list,close_list) but they are not
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 * part of the usual set specified in Space.c.
1780 */
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 unsigned long state;
1783
Pavel Emelianov7562f872007-05-03 15:13:45 -07001784 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001785 struct list_head napi_list;
Eric Dumazet44a08732009-10-27 07:03:04 +00001786 struct list_head unreg_list;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001787 struct list_head close_list;
Salam Noureddine7866a622015-01-27 11:35:48 -08001788 struct list_head ptype_all;
1789 struct list_head ptype_specific;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001790
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001791 struct {
1792 struct list_head upper;
1793 struct list_head lower;
1794 } adj_list;
1795
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001796 netdev_features_t features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001797 netdev_features_t hw_features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001798 netdev_features_t wanted_features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001799 netdev_features_t vlan_features;
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00001800 netdev_features_t hw_enc_features;
Simon Horman0d89d202013-05-23 21:02:52 +00001801 netdev_features_t mpls_features;
Alexander Duyck802ab552016-04-10 21:45:03 -04001802 netdev_features_t gso_partial_features;
Michał Mirosław04ed3e72011-01-24 15:32:47 -08001803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 int ifindex;
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02001805 int group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
Rusty Russellc45d2862007-03-28 14:29:08 -07001807 struct net_device_stats stats;
Eric Dumazet015f0682014-03-27 08:45:56 -07001808
Eric Dumazet015f0682014-03-27 08:45:56 -07001809 atomic_long_t rx_dropped;
1810 atomic_long_t tx_dropped;
Jarod Wilson6e7333d2016-02-01 18:51:05 -05001811 atomic_long_t rx_nohandler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
David Decotignyb2d3bcf2018-01-18 09:59:13 -08001813 /* Stats to monitor link on/off, flapping */
1814 atomic_t carrier_up_count;
1815 atomic_t carrier_down_count;
1816
Johannes Bergb86e0282007-04-26 20:48:23 -07001817#ifdef CONFIG_WIRELESS_EXT
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001818 const struct iw_handler_def *wireless_handlers;
1819 struct iw_public_data *wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -07001820#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001821 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -07001822 const struct ethtool_ops *ethtool_ops;
Scott Feldman41706042015-03-15 21:07:14 -07001823#ifdef CONFIG_NET_SWITCHDEV
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07001824 const struct switchdev_ops *switchdev_ops;
Scott Feldman41706042015-03-15 21:07:14 -07001825#endif
David Ahern1b69c6d2015-09-29 20:07:11 -07001826#ifdef CONFIG_NET_L3_MASTER_DEV
1827 const struct l3mdev_ops *l3mdev_ops;
1828#endif
Alexander Aringf997c552016-06-15 21:20:23 +02001829#if IS_ENABLED(CONFIG_IPV6)
1830 const struct ndisc_ops *ndisc_ops;
1831#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Shannon Nelson9cb0d212017-12-19 15:35:49 -08001833#ifdef CONFIG_XFRM_OFFLOAD
Steffen Klassertd77e38e2017-04-14 10:06:10 +02001834 const struct xfrmdev_ops *xfrmdev_ops;
1835#endif
1836
Ilya Lesokhina5c37c62018-04-30 10:16:13 +03001837#if IS_ENABLED(CONFIG_TLS_DEVICE)
1838 const struct tlsdev_ops *tlsdev_ops;
1839#endif
1840
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001841 const struct header_ops *header_ops;
1842
Karoly Kemeny536721b2014-07-30 20:27:36 +02001843 unsigned int flags;
1844 unsigned int priv_flags;
1845
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 unsigned short gflags;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001847 unsigned short padded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Karoly Kemeny536721b2014-07-30 20:27:36 +02001849 unsigned char operstate;
1850 unsigned char link_mode;
Stefan Rompfb00055a2006-03-20 17:09:11 -08001851
Karoly Kemeny536721b2014-07-30 20:27:36 +02001852 unsigned char if_port;
1853 unsigned char dma;
Joe Perchesbdc220d2011-05-09 17:42:46 +00001854
Karoly Kemeny536721b2014-07-30 20:27:36 +02001855 unsigned int mtu;
Jarod Wilson61e84622016-10-07 22:04:33 -04001856 unsigned int min_mtu;
1857 unsigned int max_mtu;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001858 unsigned short type;
1859 unsigned short hard_header_len;
Alexey Dobriyand92be7a2017-04-10 11:25:26 +03001860 unsigned char min_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Johannes Bergf5184d22008-05-12 20:48:31 -07001862 unsigned short needed_headroom;
1863 unsigned short needed_tailroom;
1864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 /* Interface address info. */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001866 unsigned char perm_addr[MAX_ADDR_LEN];
1867 unsigned char addr_assign_type;
1868 unsigned char addr_len;
Sebastian Siewiora0a96632013-12-12 10:15:59 +01001869 unsigned short neigh_priv_len;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001870 unsigned short dev_id;
1871 unsigned short dev_port;
Jiri Pirkoccffad252009-05-22 23:22:17 +00001872 spinlock_t addr_list_lock;
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001873 unsigned char name_assign_type;
1874 bool uc_promisc;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001875 struct netdev_hw_addr_list uc;
1876 struct netdev_hw_addr_list mc;
1877 struct netdev_hw_addr_list dev_addrs;
1878
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001879#ifdef CONFIG_SYSFS
1880 struct kset *queues_kset;
1881#endif
Wang Chen9d45abe2008-06-17 21:12:48 -07001882 unsigned int promiscuity;
1883 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001886 /* Protocol-specific pointers */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001887
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001888#if IS_ENABLED(CONFIG_VLAN_8021Q)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001889 struct vlan_info __rcu *vlan_info;
Jesse Gross65ac6a52010-10-20 13:56:05 +00001890#endif
Ben Hutchings34a430d2011-11-25 14:38:38 +00001891#if IS_ENABLED(CONFIG_NET_DSA)
Vivien Didelot2f657a62017-09-29 17:19:20 -04001892 struct dsa_port *dsa_ptr;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001893#endif
Ying Xue37cb0622013-12-10 20:45:41 -08001894#if IS_ENABLED(CONFIG_TIPC)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001895 struct tipc_bearer __rcu *tipc_ptr;
Ying Xue37cb0622013-12-10 20:45:41 -08001896#endif
David Ahern89e58142018-02-13 08:52:02 -08001897#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001898 void *atalk_ptr;
David Ahern89e58142018-02-13 08:52:02 -08001899#endif
Karoly Kemeny536721b2014-07-30 20:27:36 +02001900 struct in_device __rcu *ip_ptr;
David Ahern330c7272018-02-13 08:52:00 -08001901#if IS_ENABLED(CONFIG_DECNET)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001902 struct dn_dev __rcu *dn_ptr;
David Ahern330c7272018-02-13 08:52:00 -08001903#endif
Karoly Kemeny536721b2014-07-30 20:27:36 +02001904 struct inet6_dev __rcu *ip6_ptr;
David Ahern19ff13f2018-02-13 08:52:01 -08001905#if IS_ENABLED(CONFIG_AX25)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001906 void *ax25_ptr;
David Ahern19ff13f2018-02-13 08:52:01 -08001907#endif
Karoly Kemeny536721b2014-07-30 20:27:36 +02001908 struct wireless_dev *ieee80211_ptr;
Alexander Aring98a18b62014-11-02 06:44:54 +01001909 struct wpan_dev *ieee802154_ptr;
Robert Shearman03c57742015-04-22 11:14:37 +01001910#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1911 struct mpls_dev __rcu *mpls_ptr;
1912#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001914/*
Eric Dumazetcd135392010-09-16 02:58:13 +00001915 * Cache lines mostly used on receive path (including eth_type_trans())
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001916 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001917 /* Interface address info used in eth_type_trans() */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001918 unsigned char *dev_addr;
Jiri Pirkof001fde2009-05-05 02:48:28 +00001919
Tom Herbert0a9627f2010-03-16 08:03:29 +00001920 struct netdev_rx_queue *_rx;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001921 unsigned int num_rx_queues;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001922 unsigned int real_num_rx_queues;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001923
Eric Dumazet7acedaf2017-04-25 11:36:52 -07001924 struct bpf_prog __rcu *xdp_prog;
Eric Dumazet3b47d302014-11-06 21:09:44 -08001925 unsigned long gro_flush_timeout;
stephen hemminger61391cd2010-11-15 06:38:12 +00001926 rx_handler_func_t __rcu *rx_handler;
1927 void __rcu *rx_handler_data;
David S. Millere8a04642008-07-17 00:34:19 -07001928
Daniel Borkmann4cda01e2015-05-11 19:28:49 +02001929#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko46209402017-11-03 11:46:25 +01001930 struct mini_Qdisc __rcu *miniq_ingress;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02001931#endif
Eric Dumazet24824a02010-10-02 06:11:55 +00001932 struct netdev_queue __rcu *ingress_queue;
Pablo Neirae687ad62015-05-13 18:19:38 +02001933#ifdef CONFIG_NETFILTER_INGRESS
Aaron Conole960632e2017-08-24 00:08:32 +02001934 struct nf_hook_entries __rcu *nf_hooks_ingress;
Pablo Neirae687ad62015-05-13 18:19:38 +02001935#endif
Daniel Borkmannd2788d32015-05-09 22:51:32 +02001936
Karoly Kemeny536721b2014-07-30 20:27:36 +02001937 unsigned char broadcast[MAX_ADDR_LEN];
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001938#ifdef CONFIG_RFS_ACCEL
1939 struct cpu_rmap *rx_cpu_rmap;
1940#endif
1941 struct hlist_node index_hlist;
Eric Dumazetcd135392010-09-16 02:58:13 +00001942
1943/*
1944 * Cache lines mostly used on transmit path
1945 */
David S. Millere8a04642008-07-17 00:34:19 -07001946 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1947 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001948 unsigned int real_num_tx_queues;
Patrick McHardyaf356af2009-09-04 06:41:18 +00001949 struct Qdisc *qdisc;
Jiri Kosina59cc1f62016-08-10 11:05:15 +02001950#ifdef CONFIG_NET_SCHED
1951 DECLARE_HASHTABLE (qdisc_hash, 4);
1952#endif
Alexey Dobriyan0cd29502017-05-17 13:30:44 +03001953 unsigned int tx_queue_len;
David S. Millerc3f26a22008-07-31 16:58:50 -07001954 spinlock_t tx_global_lock;
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001955 int watchdog_timeo;
Eric Dumazetcd135392010-09-16 02:58:13 +00001956
Tom Herbertbf264142010-11-26 08:36:09 +00001957#ifdef CONFIG_XPS
Amritha Nambiar80d19662018-06-29 21:26:41 -07001958 struct xps_dev_maps __rcu *xps_cpus_map;
1959 struct xps_dev_maps __rcu *xps_rxqs_map;
Tom Herbertbf264142010-11-26 08:36:09 +00001960#endif
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001961#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko46209402017-11-03 11:46:25 +01001962 struct mini_Qdisc __rcu *miniq_egress;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001963#endif
Scott Feldman0c4f6912015-07-18 18:24:48 -07001964
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001965 /* These may be needed for future network-power-down code. */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001966 struct timer_list watchdog_timer;
1967
Eric Dumazet29b44332010-10-11 10:22:12 +00001968 int __percpu *pcpu_refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
Eric Dumazete014deb2009-11-17 05:59:21 +00001971 struct list_head link_watch_list;
Herbert Xu572a1032007-05-08 18:34:17 -07001972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07001974 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 NETREG_UNREGISTERING, /* called unregister_netdevice */
1976 NETREG_UNREGISTERED, /* completed unregister todo */
1977 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001978 NETREG_DUMMY, /* dummy device for NAPI poll */
Eric Dumazet449f4542011-05-19 12:24:16 +00001979 } reg_state:8;
1980
Karoly Kemeny536721b2014-07-30 20:27:36 +02001981 bool dismantle;
Patrick McHardya2835762010-02-26 06:34:51 +00001982
1983 enum {
1984 RTNL_LINK_INITIALIZED,
1985 RTNL_LINK_INITIALIZING,
1986 } rtnl_link_state:16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
David S. Millercf124db2017-05-08 12:52:56 -04001988 bool needs_free_netdev;
1989 void (*priv_destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991#ifdef CONFIG_NETPOLL
Cong Wang5fbee842013-01-22 21:29:39 +00001992 struct netpoll_info __rcu *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993#endif
David S. Millereae792b2008-07-15 03:03:33 -07001994
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -05001995 possible_net_t nd_net;
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001996
David S. Miller49517042008-05-12 03:29:11 -07001997 /* mid-layer private */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001998 union {
Karoly Kemeny536721b2014-07-30 20:27:36 +02001999 void *ml_priv;
2000 struct pcpu_lstats __percpu *lstats;
Li RongQing8f849852014-01-04 13:57:59 +08002001 struct pcpu_sw_netstats __percpu *tstats;
Karoly Kemeny536721b2014-07-30 20:27:36 +02002002 struct pcpu_dstats __percpu *dstats;
2003 struct pcpu_vstats __percpu *vstats;
Eric Dumazeta7855c72010-09-23 23:51:51 +00002004 };
Karoly Kemeny536721b2014-07-30 20:27:36 +02002005
Tobias Klauserfb585b42017-02-10 16:43:50 +01002006#if IS_ENABLED(CONFIG_GARP)
Eric Dumazet3cc77ec2010-10-24 21:32:36 +00002007 struct garp_port __rcu *garp_port;
Tobias Klauserfb585b42017-02-10 16:43:50 +01002008#endif
2009#if IS_ENABLED(CONFIG_MRP)
David Wardfebf0182013-02-08 17:17:06 +00002010 struct mrp_port __rcu *mrp_port;
Tobias Klauserfb585b42017-02-10 16:43:50 +01002011#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002013 struct device dev;
Eric W. Biederman0c509a62009-10-29 14:18:21 +00002014 const struct attribute_group *sysfs_groups[4];
Michael Daltona953be52014-01-16 22:23:28 -08002015 const struct attribute_group *sysfs_rx_queue_group;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002016
Patrick McHardy38f7b872007-06-13 12:03:51 -07002017 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002018
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002019 /* for setting kernel sock attribute on TCP connection setup */
2020#define GSO_MAX_SIZE 65536
2021 unsigned int gso_max_size;
Ben Hutchings30b678d2012-07-30 15:57:00 +00002022#define GSO_MAX_SEGS 65535
2023 u16 gso_max_segs;
Eric Dumazet743b03a2016-04-09 11:29:58 -07002024
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002025#ifdef CONFIG_DCB
Stephen Hemminger32953542009-10-05 06:01:03 +00002026 const struct dcbnl_rtnl_ops *dcbnl_ops;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002027#endif
Alexander Duyckffcfe252018-07-09 12:19:38 -04002028 s16 num_tc;
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002029 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2030 u8 prio_tc_map[TC_BITMASK + 1];
Alexander Duyck2f90b862008-11-20 20:52:10 -08002031
Ben Hutchingsd11ead72011-11-25 14:40:26 +00002032#if IS_ENABLED(CONFIG_FCOE)
Yi Zou4d288d52009-02-27 14:06:59 -08002033 unsigned int fcoe_ddp_xid;
2034#endif
Daniel Borkmann86f85152013-12-29 17:27:11 +01002035#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00002036 struct netprio_map __rcu *priomap;
2037#endif
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002038 struct phy_device *phydev;
Russell Kinge679c9c2018-03-28 15:44:16 -07002039 struct sfp_bus *sfp_bus;
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002040 struct lock_class_key *qdisc_tx_busylock;
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07002041 struct lock_class_key *qdisc_running_key;
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002042 bool proto_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07002044#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
David S. Millerb5cdae32017-04-18 15:36:58 -04002046static inline bool netif_elide_gro(const struct net_device *dev)
2047{
2048 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2049 return true;
2050 return false;
2051}
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
David S. Millere8a04642008-07-17 00:34:19 -07002055static inline
John Fastabend4f57c082011-01-17 08:06:04 +00002056int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2057{
2058 return dev->prio_tc_map[prio & TC_BITMASK];
2059}
2060
2061static inline
2062int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2063{
2064 if (tc >= dev->num_tc)
2065 return -EINVAL;
2066
2067 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2068 return 0;
2069}
2070
Alexander Duyck8d059b02016-10-28 11:43:49 -04002071int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002072void netdev_reset_tc(struct net_device *dev);
2073int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2074int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
John Fastabend4f57c082011-01-17 08:06:04 +00002075
2076static inline
2077int netdev_get_num_tc(struct net_device *dev)
2078{
2079 return dev->num_tc;
2080}
2081
Alexander Duyckffcfe252018-07-09 12:19:38 -04002082void netdev_unbind_sb_channel(struct net_device *dev,
2083 struct net_device *sb_dev);
2084int netdev_bind_sb_channel_queue(struct net_device *dev,
2085 struct net_device *sb_dev,
2086 u8 tc, u16 count, u16 offset);
2087int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2088static inline int netdev_get_sb_channel(struct net_device *dev)
2089{
2090 return max_t(int, -dev->num_tc, 0);
2091}
2092
John Fastabend4f57c082011-01-17 08:06:04 +00002093static inline
David S. Millere8a04642008-07-17 00:34:19 -07002094struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2095 unsigned int index)
2096{
2097 return &dev->_tx[index];
2098}
2099
Daniel Borkmann10c51b56232014-08-27 11:11:27 +02002100static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2101 const struct sk_buff *skb)
2102{
2103 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2104}
2105
David S. Millere8a04642008-07-17 00:34:19 -07002106static inline void netdev_for_each_tx_queue(struct net_device *dev,
2107 void (*f)(struct net_device *,
2108 struct netdev_queue *,
2109 void *),
2110 void *arg)
2111{
2112 unsigned int i;
2113
2114 for (i = 0; i < dev->num_tx_queues; i++)
2115 f(dev, &dev->_tx[i], arg);
2116}
2117
Eric Dumazetd3fff6c2016-06-09 07:45:12 -07002118#define netdev_lockdep_set_classes(dev) \
2119{ \
2120 static struct lock_class_key qdisc_tx_busylock_key; \
2121 static struct lock_class_key qdisc_running_key; \
2122 static struct lock_class_key qdisc_xmit_lock_key; \
2123 static struct lock_class_key dev_addr_list_lock_key; \
2124 unsigned int i; \
2125 \
2126 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2127 (dev)->qdisc_running_key = &qdisc_running_key; \
2128 lockdep_set_class(&(dev)->addr_list_lock, \
2129 &dev_addr_list_lock_key); \
2130 for (i = 0; i < (dev)->num_tx_queues; i++) \
2131 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2132 &qdisc_xmit_lock_key); \
2133}
2134
Joe Perchesf629d202013-09-26 14:48:15 -07002135struct netdev_queue *netdev_pick_tx(struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08002136 struct sk_buff *skb,
Alexander Duyckeadec8772018-07-09 12:19:48 -04002137 struct net_device *sb_dev);
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002138
Paolo Abeni871b6422016-02-26 10:45:37 +01002139/* returns the headroom that the master device needs to take in account
2140 * when forwarding to this dev
2141 */
2142static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2143{
2144 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2145}
2146
2147static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2148{
2149 if (dev->netdev_ops->ndo_set_rx_headroom)
2150 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2151}
2152
2153/* set the device rx headroom to the dev's default */
2154static inline void netdev_reset_rx_headroom(struct net_device *dev)
2155{
2156 netdev_set_rx_headroom(dev, -1);
2157}
2158
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002159/*
2160 * Net namespace inlines
2161 */
2162static inline
2163struct net *dev_net(const struct net_device *dev)
2164{
Eric Dumazetc2d9ba92010-06-01 06:51:19 +00002165 return read_pnet(&dev->nd_net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002166}
2167
2168static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -07002169void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002170{
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -05002171 write_pnet(&dev->nd_net, net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002172}
2173
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002174/**
2175 * netdev_priv - access network device private data
2176 * @dev: network device
2177 *
2178 * Get network device private data
2179 */
Patrick McHardy6472ce62007-06-13 12:03:21 -07002180static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00002182 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183}
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185/* Set the sysfs physical device reference for the network logical device
2186 * if set prior to registration will cause a symlink during initialization.
2187 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07002188#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Marcel Holtmann384912e2009-08-31 21:08:19 +00002190/* Set the sysfs device type for the network logical device to allow
Maxime Jayat3f794102013-10-12 01:29:46 +02002191 * fine-grained identification of different network device types. For
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002192 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
Marcel Holtmann384912e2009-08-31 21:08:19 +00002193 */
2194#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2195
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00002196/* Default NAPI poll() weight
2197 * Device drivers are strongly advised to not use bigger value
2198 */
2199#define NAPI_POLL_WEIGHT 64
2200
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002201/**
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002202 * netif_napi_add - initialize a NAPI context
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002203 * @dev: network device
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002204 * @napi: NAPI context
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002205 * @poll: polling function
2206 * @weight: default weight
2207 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002208 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2209 * *any* of the other NAPI-related functions.
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002210 */
Herbert Xud565b0a2008-12-15 23:38:52 -08002211void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2212 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002213
Alexander Duyckd8156532008-07-08 15:13:05 -07002214/**
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002215 * netif_tx_napi_add - initialize a NAPI context
Eric Dumazetd64b5e82015-11-18 06:31:00 -08002216 * @dev: network device
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002217 * @napi: NAPI context
Eric Dumazetd64b5e82015-11-18 06:31:00 -08002218 * @poll: polling function
2219 * @weight: default weight
2220 *
2221 * This variant of netif_napi_add() should be used from drivers using NAPI
2222 * to exclusively poll a TX queue.
2223 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2224 */
2225static inline void netif_tx_napi_add(struct net_device *dev,
2226 struct napi_struct *napi,
2227 int (*poll)(struct napi_struct *, int),
2228 int weight)
2229{
2230 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2231 netif_napi_add(dev, napi, poll, weight);
2232}
2233
2234/**
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002235 * netif_napi_del - remove a NAPI context
2236 * @napi: NAPI context
Alexander Duyckd8156532008-07-08 15:13:05 -07002237 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002238 * netif_napi_del() removes a NAPI context from the network device NAPI list
Alexander Duyckd8156532008-07-08 15:13:05 -07002239 */
Herbert Xud565b0a2008-12-15 23:38:52 -08002240void netif_napi_del(struct napi_struct *napi);
2241
2242struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00002243 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002244 void *frag0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002245
Herbert Xu74895942009-05-26 18:50:27 +00002246 /* Length of frag0. */
2247 unsigned int frag0_len;
2248
Herbert Xu86911732009-01-29 14:19:50 +00002249 /* This indicates where we are processing relative to skb->data. */
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002250 int data_offset;
Herbert Xu86911732009-01-29 14:19:50 +00002251
Herbert Xud565b0a2008-12-15 23:38:52 -08002252 /* This is non-zero if the packet cannot be merged with the new skb. */
Jerry Chubf5a7552014-01-07 10:23:19 -08002253 u16 flush;
2254
2255 /* Save the IP ID here and check when we get to the transport layer */
2256 u16 flush_id;
Herbert Xud565b0a2008-12-15 23:38:52 -08002257
2258 /* Number of segments aggregated. */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00002259 u16 count;
2260
Tom Herbert15e23962015-02-10 16:30:31 -08002261 /* Start offset for remote checksum offload */
2262 u16 gro_remcsum_start;
2263
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00002264 /* jiffies when first packet was created/queued */
2265 unsigned long age;
Eric Dumazet86347242012-10-08 21:38:50 +02002266
Tom Herbertafe93322014-09-17 12:25:57 -07002267 /* Used in ipv6_gro_receive() and foo-over-udp */
Or Gerlitzb582ef02014-01-20 13:59:19 +02002268 u16 proto;
2269
Tom Herbertbaa32ff2015-02-10 16:30:30 -08002270 /* This is non-zero if the packet may be of the same flow. */
2271 u8 same_flow:1;
2272
Jesse Grossfac8e0f2016-03-19 09:32:01 -07002273 /* Used in tunnel GRO receive */
2274 u8 encap_mark:1;
Tom Herbert573e8fc2014-08-22 13:33:47 -07002275
2276 /* GRO checksum is valid */
2277 u8 csum_valid:1;
2278
Tom Herbert662880f2014-08-27 21:26:56 -07002279 /* Number of checksums via CHECKSUM_UNNECESSARY */
2280 u8 csum_cnt:3;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00002281
Tom Herbertbaa32ff2015-02-10 16:30:30 -08002282 /* Free the skb? */
2283 u8 free:2;
2284#define NAPI_GRO_FREE 1
2285#define NAPI_GRO_FREE_STOLEN_HEAD 2
2286
Tom Herbertefc98d02014-10-03 15:48:08 -07002287 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2288 u8 is_ipv6:1;
2289
Alexander Duycka0ca1532016-04-05 09:13:39 -07002290 /* Used in GRE, set in fou/gue_gro_receive */
2291 u8 is_fou:1;
2292
Alexander Duyck15305452016-04-10 21:44:57 -04002293 /* Used to determine if flush_id can be ignored */
2294 u8 is_atomic:1;
2295
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002296 /* Number of gro_receive callbacks this packet already went through */
2297 u8 recursion_counter:4;
2298
2299 /* 1 bit hole */
Tom Herbertbaa32ff2015-02-10 16:30:30 -08002300
Jerry Chubf5a7552014-01-07 10:23:19 -08002301 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2302 __wsum csum;
2303
Eric Dumazetc3c7c252012-12-06 13:54:59 +00002304 /* used in skb_gro_receive() slow path */
2305 struct sk_buff *last;
Herbert Xud565b0a2008-12-15 23:38:52 -08002306};
2307
2308#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07002309
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002310#define GRO_RECURSION_LIMIT 15
2311static inline int gro_recursion_inc_test(struct sk_buff *skb)
2312{
2313 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2314}
2315
David Millerd4546c22018-06-24 14:13:49 +09002316typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2317static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2318 struct list_head *head,
2319 struct sk_buff *skb)
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002320{
2321 if (unlikely(gro_recursion_inc_test(skb))) {
2322 NAPI_GRO_CB(skb)->flush |= 1;
2323 return NULL;
2324 }
2325
2326 return cb(head, skb);
2327}
2328
David Millerd4546c22018-06-24 14:13:49 +09002329typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2330 struct sk_buff *);
2331static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2332 struct sock *sk,
2333 struct list_head *head,
2334 struct sk_buff *skb)
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002335{
2336 if (unlikely(gro_recursion_inc_test(skb))) {
2337 NAPI_GRO_CB(skb)->flush |= 1;
2338 return NULL;
2339 }
2340
2341 return cb(sk, head, skb);
2342}
2343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002345 __be16 type; /* This is really htons(ether_type). */
Vincent Whitchurchfa788d92018-09-03 16:23:36 +02002346 bool ignore_outgoing;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002347 struct net_device *dev; /* NULL is wildcarded here */
2348 int (*func) (struct sk_buff *,
2349 struct net_device *,
2350 struct packet_type *,
2351 struct net_device *);
Edward Cree17266ee2018-07-02 16:14:12 +01002352 void (*list_func) (struct list_head *,
2353 struct packet_type *,
2354 struct net_device *);
Eric Leblondc0de08d2012-08-16 22:02:58 +00002355 bool (*id_match)(struct packet_type *ptype,
2356 struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 void *af_packet_priv;
2358 struct list_head list;
2359};
2360
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002361struct offload_callbacks {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2363 netdev_features_t features);
David Millerd4546c22018-06-24 14:13:49 +09002364 struct sk_buff *(*gro_receive)(struct list_head *head,
2365 struct sk_buff *skb);
Jerry Chu299603e82013-12-11 20:53:45 -08002366 int (*gro_complete)(struct sk_buff *skb, int nhoff);
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002367};
2368
2369struct packet_offload {
2370 __be16 type; /* This is really htons(ether_type). */
David S. Millerbdef7de2015-06-01 14:56:09 -07002371 u16 priority;
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002372 struct offload_callbacks callbacks;
2373 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374};
2375
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002376/* often modified stats are per-CPU, other are shared (netdev->stats) */
Li RongQing8f849852014-01-04 13:57:59 +08002377struct pcpu_sw_netstats {
2378 u64 rx_packets;
2379 u64 rx_bytes;
2380 u64 tx_packets;
2381 u64 tx_bytes;
2382 struct u64_stats_sync syncp;
2383};
2384
Li RongQing52bb6672018-09-14 16:00:51 +08002385struct pcpu_lstats {
2386 u64 packets;
2387 u64 bytes;
2388 struct u64_stats_sync syncp;
2389};
2390
Pablo Neira Ayusoaabc92b2015-11-10 14:31:18 +01002391#define __netdev_alloc_pcpu_stats(type, gfp) \
2392({ \
2393 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2394 if (pcpu_stats) { \
2395 int __cpu; \
2396 for_each_possible_cpu(__cpu) { \
2397 typeof(type) *stat; \
2398 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2399 u64_stats_init(&stat->syncp); \
2400 } \
2401 } \
2402 pcpu_stats; \
WANG Cong1c213bd2014-02-13 11:46:28 -08002403})
2404
Pablo Neira Ayusoaabc92b2015-11-10 14:31:18 +01002405#define netdev_alloc_pcpu_stats(type) \
Felix Fietkau326fcfa2015-12-05 13:58:11 +01002406 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
Pablo Neira Ayusoaabc92b2015-11-10 14:31:18 +01002407
Jiri Pirko764f5e52015-12-03 12:12:12 +01002408enum netdev_lag_tx_type {
2409 NETDEV_LAG_TX_TYPE_UNKNOWN,
2410 NETDEV_LAG_TX_TYPE_RANDOM,
2411 NETDEV_LAG_TX_TYPE_BROADCAST,
2412 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2413 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2414 NETDEV_LAG_TX_TYPE_HASH,
2415};
2416
John Hurleyf44aa9e2018-05-23 19:22:52 -07002417enum netdev_lag_hash {
2418 NETDEV_LAG_HASH_NONE,
2419 NETDEV_LAG_HASH_L2,
2420 NETDEV_LAG_HASH_L34,
2421 NETDEV_LAG_HASH_L23,
2422 NETDEV_LAG_HASH_E23,
2423 NETDEV_LAG_HASH_E34,
2424 NETDEV_LAG_HASH_UNKNOWN,
2425};
2426
Jiri Pirko764f5e52015-12-03 12:12:12 +01002427struct netdev_lag_upper_info {
2428 enum netdev_lag_tx_type tx_type;
John Hurleyf44aa9e2018-05-23 19:22:52 -07002429 enum netdev_lag_hash hash_type;
Jiri Pirko764f5e52015-12-03 12:12:12 +01002430};
2431
Jiri Pirkofb1b2e32015-12-03 12:12:16 +01002432struct netdev_lag_lower_state_info {
2433 u8 link_up : 1,
2434 tx_enabled : 1;
2435};
2436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437#include <linux/notifier.h>
2438
Kirill Tkhaiede27622018-03-23 19:47:19 +03002439/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
2440 * and the rtnetlink notification exclusion list in rtnetlink_event() when
2441 * adding new types.
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002442 */
Kirill Tkhaiede27622018-03-23 19:47:19 +03002443enum netdev_cmd {
2444 NETDEV_UP = 1, /* For now you can't veto a device up/down */
2445 NETDEV_DOWN,
2446 NETDEV_REBOOT, /* Tell a protocol stack a network interface
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002447 detected a hardware crash and restarted
2448 - we can use this eg to kick tcp sessions
2449 once done */
Kirill Tkhaiede27622018-03-23 19:47:19 +03002450 NETDEV_CHANGE, /* Notify device state change */
2451 NETDEV_REGISTER,
2452 NETDEV_UNREGISTER,
2453 NETDEV_CHANGEMTU, /* notify after mtu change happened */
2454 NETDEV_CHANGEADDR,
2455 NETDEV_GOING_DOWN,
2456 NETDEV_CHANGENAME,
2457 NETDEV_FEAT_CHANGE,
2458 NETDEV_BONDING_FAILOVER,
2459 NETDEV_PRE_UP,
2460 NETDEV_PRE_TYPE_CHANGE,
2461 NETDEV_POST_TYPE_CHANGE,
2462 NETDEV_POST_INIT,
Kirill Tkhaiede27622018-03-23 19:47:19 +03002463 NETDEV_RELEASE,
2464 NETDEV_NOTIFY_PEERS,
2465 NETDEV_JOIN,
2466 NETDEV_CHANGEUPPER,
2467 NETDEV_RESEND_IGMP,
2468 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
2469 NETDEV_CHANGEINFODATA,
2470 NETDEV_BONDING_INFO,
2471 NETDEV_PRECHANGEUPPER,
2472 NETDEV_CHANGELOWERSTATE,
2473 NETDEV_UDP_TUNNEL_PUSH_INFO,
2474 NETDEV_UDP_TUNNEL_DROP_INFO,
2475 NETDEV_CHANGE_TX_QUEUE_LEN,
Gal Pressman9daae9b2018-03-28 17:46:54 +03002476 NETDEV_CVLAN_FILTER_PUSH_INFO,
2477 NETDEV_CVLAN_FILTER_DROP_INFO,
2478 NETDEV_SVLAN_FILTER_PUSH_INFO,
2479 NETDEV_SVLAN_FILTER_DROP_INFO,
Kirill Tkhaiede27622018-03-23 19:47:19 +03002480};
2481const char *netdev_cmd_to_name(enum netdev_cmd cmd);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002482
Joe Perchesf629d202013-09-26 14:48:15 -07002483int register_netdevice_notifier(struct notifier_block *nb);
2484int unregister_netdevice_notifier(struct notifier_block *nb);
Jiri Pirko351638e2013-05-28 01:30:21 +00002485
2486struct netdev_notifier_info {
David Ahern51d0c0472017-10-04 17:48:45 -07002487 struct net_device *dev;
2488 struct netlink_ext_ack *extack;
Jiri Pirko351638e2013-05-28 01:30:21 +00002489};
2490
Jiri Pirkobe9efd32013-05-28 01:30:22 +00002491struct netdev_notifier_change_info {
2492 struct netdev_notifier_info info; /* must be first */
2493 unsigned int flags_changed;
2494};
2495
Jiri Pirko0e4ead92015-08-27 09:31:18 +02002496struct netdev_notifier_changeupper_info {
2497 struct netdev_notifier_info info; /* must be first */
2498 struct net_device *upper_dev; /* new upper dev */
2499 bool master; /* is upper dev master */
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002500 bool linking; /* is the notification for link or unlink */
Jiri Pirko29bf24a2015-12-03 12:12:11 +01002501 void *upper_info; /* upper dev info */
Jiri Pirko0e4ead92015-08-27 09:31:18 +02002502};
2503
Jiri Pirko04d48262015-12-03 12:12:15 +01002504struct netdev_notifier_changelowerstate_info {
2505 struct netdev_notifier_info info; /* must be first */
2506 void *lower_state_info; /* is lower dev state */
2507};
2508
Cong Wang75538c22013-05-29 11:30:50 +08002509static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2510 struct net_device *dev)
2511{
2512 info->dev = dev;
David Ahern51d0c0472017-10-04 17:48:45 -07002513 info->extack = NULL;
Cong Wang75538c22013-05-29 11:30:50 +08002514}
2515
Jiri Pirko351638e2013-05-28 01:30:21 +00002516static inline struct net_device *
2517netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2518{
2519 return info->dev;
2520}
2521
David Ahern51d0c0472017-10-04 17:48:45 -07002522static inline struct netlink_ext_ack *
2523netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2524{
2525 return info->extack;
2526}
2527
Joe Perchesf629d202013-09-26 14:48:15 -07002528int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002529
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531extern rwlock_t dev_base_lock; /* Device list lock */
2532
Eric W. Biederman881d9662007-09-17 11:56:21 -07002533#define for_each_netdev(net, d) \
2534 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
Eric W. Biedermandcbccbd42009-11-29 22:25:26 +00002535#define for_each_netdev_reverse(net, d) \
2536 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08002537#define for_each_netdev_rcu(net, d) \
2538 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002539#define for_each_netdev_safe(net, d, n) \
2540 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2541#define for_each_netdev_continue(net, d) \
2542 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
stephen hemminger254245d2009-11-10 07:54:47 +00002543#define for_each_netdev_continue_rcu(net, d) \
2544 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00002545#define for_each_netdev_in_bond_rcu(bond, slave) \
2546 for_each_netdev_rcu(&init_net, slave) \
Benjamin Poirier4ccce022015-01-14 16:52:35 +09002547 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
Pavel Emelianov7562f872007-05-03 15:13:45 -07002548#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2549
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002550static inline struct net_device *next_net_device(struct net_device *dev)
2551{
2552 struct list_head *lh;
2553 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002554
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002555 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002556 lh = dev->dev_list.next;
2557 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2558}
2559
Eric Dumazetce81b762009-11-11 17:34:30 +00002560static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2561{
2562 struct list_head *lh;
2563 struct net *net;
2564
2565 net = dev_net(dev);
Eric Dumazetccf43432011-01-26 18:08:02 +00002566 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
Eric Dumazetce81b762009-11-11 17:34:30 +00002567 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2568}
2569
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002570static inline struct net_device *first_net_device(struct net *net)
2571{
2572 return list_empty(&net->dev_base_head) ? NULL :
2573 net_device_entry(net->dev_base_head.next);
2574}
Pavel Emelianov7562f872007-05-03 15:13:45 -07002575
Eric Dumazetccf43432011-01-26 18:08:02 +00002576static inline struct net_device *first_net_device_rcu(struct net *net)
2577{
2578 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2579
2580 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2581}
2582
Joe Perchesf629d202013-09-26 14:48:15 -07002583int netdev_boot_setup_check(struct net_device *dev);
2584unsigned long netdev_boot_base(const char *prefix, int unit);
2585struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2586 const char *hwaddr);
2587struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2588struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2589void dev_add_pack(struct packet_type *pt);
2590void dev_remove_pack(struct packet_type *pt);
2591void __dev_remove_pack(struct packet_type *pt);
2592void dev_add_offload(struct packet_offload *po);
2593void dev_remove_offload(struct packet_offload *po);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594
Nicolas Dichtela54acb32015-04-02 17:07:00 +02002595int dev_get_iflink(const struct net_device *dev);
Pravin B Shelarfc4099f2015-10-22 18:17:16 -07002596int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
WANG Cong6c555492014-09-11 15:35:09 -07002597struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2598 unsigned short mask);
Joe Perchesf629d202013-09-26 14:48:15 -07002599struct net_device *dev_get_by_name(struct net *net, const char *name);
2600struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2601struct net_device *__dev_get_by_name(struct net *net, const char *name);
2602int dev_alloc_name(struct net_device *dev, const char *name);
2603int dev_open(struct net_device *dev);
stephen hemminger7051b882017-07-18 15:59:27 -07002604void dev_close(struct net_device *dev);
2605void dev_close_many(struct list_head *head, bool unlink);
Joe Perchesf629d202013-09-26 14:48:15 -07002606void dev_disable_lro(struct net_device *dev);
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05002607int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
Alexander Duycka4ea8a32018-07-09 12:19:54 -04002608u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
Alexander Duyck4f49dec2018-07-09 12:19:59 -04002609 struct net_device *sb_dev,
2610 select_queue_fallback_t fallback);
Alexander Duycka4ea8a32018-07-09 12:19:54 -04002611u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
Alexander Duyck4f49dec2018-07-09 12:19:59 -04002612 struct net_device *sb_dev,
2613 select_queue_fallback_t fallback);
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05002614int dev_queue_xmit(struct sk_buff *skb);
Alexander Duyckeadec8772018-07-09 12:19:48 -04002615int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
Magnus Karlsson865b03f2018-05-02 13:01:33 +02002616int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
Joe Perchesf629d202013-09-26 14:48:15 -07002617int register_netdevice(struct net_device *dev);
2618void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2619void unregister_netdevice_many(struct list_head *head);
Eric Dumazet44a08732009-10-27 07:03:04 +00002620static inline void unregister_netdevice(struct net_device *dev)
2621{
2622 unregister_netdevice_queue(dev, NULL);
2623}
2624
Joe Perchesf629d202013-09-26 14:48:15 -07002625int netdev_refcnt_read(const struct net_device *dev);
2626void free_netdev(struct net_device *dev);
Eric Dumazet74d332c2013-10-30 13:10:44 -07002627void netdev_freemem(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07002628void synchronize_net(void);
2629int init_dummy_netdev(struct net_device *dev);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08002630
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02002631DECLARE_PER_CPU(int, xmit_recursion);
Daniel Borkmanna70b5062016-06-10 21:19:06 +02002632#define XMIT_RECURSION_LIMIT 10
2633
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +02002634static inline int dev_recursion_level(void)
2635{
2636 return this_cpu_read(xmit_recursion);
2637}
2638
Joe Perchesf629d202013-09-26 14:48:15 -07002639struct net_device *dev_get_by_index(struct net *net, int ifindex);
2640struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2641struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
Miroslav Lichvar90b602f2017-05-19 17:52:37 +02002642struct net_device *dev_get_by_napi_id(unsigned int napi_id);
Joe Perchesf629d202013-09-26 14:48:15 -07002643int netdev_get_name(struct net *net, char *name, int ifindex);
2644int dev_restart(struct net_device *dev);
David Millerd4546c22018-06-24 14:13:49 +09002645int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
Herbert Xu86911732009-01-29 14:19:50 +00002646
2647static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2648{
2649 return NAPI_GRO_CB(skb)->data_offset;
2650}
2651
2652static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2653{
2654 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2655}
2656
2657static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2658{
2659 NAPI_GRO_CB(skb)->data_offset += len;
2660}
2661
Herbert Xua5b1cf22009-05-26 18:50:28 +00002662static inline void *skb_gro_header_fast(struct sk_buff *skb,
2663 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00002664{
Herbert Xu78a478d2009-05-26 18:50:21 +00002665 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00002666}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
Herbert Xua5b1cf22009-05-26 18:50:28 +00002668static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2669{
2670 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2671}
2672
Herbert Xu57ea52a2017-01-10 12:24:15 -08002673static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2674{
2675 NAPI_GRO_CB(skb)->frag0 = NULL;
2676 NAPI_GRO_CB(skb)->frag0_len = 0;
2677}
2678
Herbert Xua5b1cf22009-05-26 18:50:28 +00002679static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2680 unsigned int offset)
2681{
Herbert Xu17dd7592011-07-27 06:16:28 -07002682 if (!pskb_may_pull(skb, hlen))
2683 return NULL;
2684
Herbert Xu57ea52a2017-01-10 12:24:15 -08002685 skb_gro_frag0_invalidate(skb);
Herbert Xu17dd7592011-07-27 06:16:28 -07002686 return skb->data + offset;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002687}
2688
Herbert Xu36e7b1b2009-04-27 05:44:45 -07002689static inline void *skb_gro_network_header(struct sk_buff *skb)
2690{
Herbert Xu78d3fd02009-05-26 18:50:23 +00002691 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2692 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07002693}
2694
Jerry Chubf5a7552014-01-07 10:23:19 -08002695static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2696 const void *start, unsigned int len)
2697{
Tom Herbert573e8fc2014-08-22 13:33:47 -07002698 if (NAPI_GRO_CB(skb)->csum_valid)
Jerry Chubf5a7552014-01-07 10:23:19 -08002699 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2700 csum_partial(start, len, 0));
2701}
2702
Tom Herbert573e8fc2014-08-22 13:33:47 -07002703/* GRO checksum functions. These are logical equivalents of the normal
2704 * checksum functions (in skbuff.h) except that they operate on the GRO
2705 * offsets and fields in sk_buff.
2706 */
2707
2708__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2709
Tom Herbert15e23962015-02-10 16:30:31 -08002710static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2711{
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002712 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
Tom Herbert15e23962015-02-10 16:30:31 -08002713}
2714
Tom Herbert573e8fc2014-08-22 13:33:47 -07002715static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2716 bool zero_okay,
2717 __sum16 check)
2718{
Tom Herbert6edec0e2015-02-10 16:30:28 -08002719 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2720 skb_checksum_start_offset(skb) <
2721 skb_gro_offset(skb)) &&
Tom Herbert15e23962015-02-10 16:30:31 -08002722 !skb_at_gro_remcsum_start(skb) &&
Tom Herbert662880f2014-08-27 21:26:56 -07002723 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
Tom Herbert573e8fc2014-08-22 13:33:47 -07002724 (!zero_okay || check));
2725}
2726
2727static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2728 __wsum psum)
2729{
2730 if (NAPI_GRO_CB(skb)->csum_valid &&
2731 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2732 return 0;
2733
2734 NAPI_GRO_CB(skb)->csum = psum;
2735
2736 return __skb_gro_checksum_complete(skb);
2737}
2738
Tom Herbert573e8fc2014-08-22 13:33:47 -07002739static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2740{
Tom Herbert662880f2014-08-27 21:26:56 -07002741 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2742 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2743 NAPI_GRO_CB(skb)->csum_cnt--;
2744 } else {
2745 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2746 * verified a new top level checksum or an encapsulated one
2747 * during GRO. This saves work if we fallback to normal path.
2748 */
2749 __skb_incr_checksum_unnecessary(skb);
Tom Herbert573e8fc2014-08-22 13:33:47 -07002750 }
2751}
2752
2753#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2754 compute_pseudo) \
2755({ \
2756 __sum16 __ret = 0; \
2757 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2758 __ret = __skb_gro_checksum_validate_complete(skb, \
2759 compute_pseudo(skb, proto)); \
Davide Caratti219f1d792017-05-18 15:44:39 +02002760 if (!__ret) \
Tom Herbert573e8fc2014-08-22 13:33:47 -07002761 skb_gro_incr_csum_unnecessary(skb); \
2762 __ret; \
2763})
2764
2765#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2766 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2767
2768#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2769 compute_pseudo) \
2770 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2771
2772#define skb_gro_checksum_simple_validate(skb) \
2773 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2774
Tom Herbertd96535a2014-08-31 15:12:42 -07002775static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2776{
2777 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2778 !NAPI_GRO_CB(skb)->csum_valid);
2779}
2780
2781static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2782 __sum16 check, __wsum pseudo)
2783{
2784 NAPI_GRO_CB(skb)->csum = ~pseudo;
2785 NAPI_GRO_CB(skb)->csum_valid = 1;
2786}
2787
2788#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2789do { \
2790 if (__skb_gro_checksum_convert_check(skb)) \
2791 __skb_gro_checksum_convert(skb, check, \
2792 compute_pseudo(skb, proto)); \
2793} while (0)
2794
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002795struct gro_remcsum {
2796 int offset;
2797 __wsum delta;
2798};
2799
2800static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2801{
Geert Uytterhoeven846cd662015-02-18 11:38:06 +01002802 grc->offset = 0;
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002803 grc->delta = 0;
2804}
2805
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002806static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2807 unsigned int off, size_t hdrlen,
2808 int start, int offset,
2809 struct gro_remcsum *grc,
2810 bool nopartial)
Tom Herbertdcdc8992015-02-02 16:07:34 -08002811{
2812 __wsum delta;
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002813 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
Tom Herbertdcdc8992015-02-02 16:07:34 -08002814
2815 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2816
Tom Herbert15e23962015-02-10 16:30:31 -08002817 if (!nopartial) {
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002818 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2819 return ptr;
Tom Herbert15e23962015-02-10 16:30:31 -08002820 }
2821
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002822 ptr = skb_gro_header_fast(skb, off);
2823 if (skb_gro_header_hard(skb, off + plen)) {
2824 ptr = skb_gro_header_slow(skb, off + plen, off);
2825 if (!ptr)
2826 return NULL;
2827 }
2828
2829 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2830 start, offset);
Tom Herbertdcdc8992015-02-02 16:07:34 -08002831
2832 /* Adjust skb->csum since we changed the packet */
Tom Herbertdcdc8992015-02-02 16:07:34 -08002833 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002834
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002835 grc->offset = off + hdrlen + offset;
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002836 grc->delta = delta;
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002837
2838 return ptr;
Tom Herbertdcdc8992015-02-02 16:07:34 -08002839}
2840
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002841static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2842 struct gro_remcsum *grc)
2843{
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002844 void *ptr;
2845 size_t plen = grc->offset + sizeof(u16);
2846
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002847 if (!grc->delta)
2848 return;
2849
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002850 ptr = skb_gro_header_fast(skb, grc->offset);
2851 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2852 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2853 if (!ptr)
2854 return;
2855 }
2856
2857 remcsum_unadjust((__sum16 *)ptr, grc->delta);
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002858}
Tom Herbertdcdc8992015-02-02 16:07:34 -08002859
Steffen Klassert25393d32017-02-15 09:39:44 +01002860#ifdef CONFIG_XFRM_OFFLOAD
David Millerd4546c22018-06-24 14:13:49 +09002861static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
Steffen Klassert25393d32017-02-15 09:39:44 +01002862{
2863 if (PTR_ERR(pp) != -EINPROGRESS)
2864 NAPI_GRO_CB(skb)->flush |= flush;
2865}
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002866static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
David S. Miller5cd3da42018-07-03 10:26:50 +09002867 struct sk_buff *pp,
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002868 int flush,
2869 struct gro_remcsum *grc)
2870{
2871 if (PTR_ERR(pp) != -EINPROGRESS) {
2872 NAPI_GRO_CB(skb)->flush |= flush;
2873 skb_gro_remcsum_cleanup(skb, grc);
2874 skb->remcsum_offload = 0;
2875 }
2876}
Steffen Klassert25393d32017-02-15 09:39:44 +01002877#else
David Millerd4546c22018-06-24 14:13:49 +09002878static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
Steffen Klassert5f114162017-02-15 09:39:39 +01002879{
2880 NAPI_GRO_CB(skb)->flush |= flush;
2881}
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002882static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
David S. Miller5cd3da42018-07-03 10:26:50 +09002883 struct sk_buff *pp,
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002884 int flush,
2885 struct gro_remcsum *grc)
2886{
2887 NAPI_GRO_CB(skb)->flush |= flush;
2888 skb_gro_remcsum_cleanup(skb, grc);
2889 skb->remcsum_offload = 0;
2890}
Steffen Klassert25393d32017-02-15 09:39:44 +01002891#endif
Steffen Klassert5f114162017-02-15 09:39:39 +01002892
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002893static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2894 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002895 const void *daddr, const void *saddr,
Eric Dumazet95c96172012-04-15 05:58:06 +00002896 unsigned int len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002897{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02002898 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002899 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002900
2901 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002902}
2903
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002904static inline int dev_parse_header(const struct sk_buff *skb,
2905 unsigned char *haddr)
2906{
2907 const struct net_device *dev = skb->dev;
2908
Patrick McHardy1b833362007-10-18 05:09:28 -07002909 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002910 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002911 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002912}
2913
Willem de Bruijn2793a232016-03-09 21:58:32 -05002914/* ll_header must have at least hard_header_len allocated */
2915static inline bool dev_validate_header(const struct net_device *dev,
2916 char *ll_header, int len)
2917{
2918 if (likely(len >= dev->hard_header_len))
2919 return true;
Willem de Bruijn217e6fa2017-02-07 15:57:20 -05002920 if (len < dev->min_header_len)
2921 return false;
Willem de Bruijn2793a232016-03-09 21:58:32 -05002922
2923 if (capable(CAP_SYS_RAWIO)) {
2924 memset(ll_header + len, 0, dev->hard_header_len - len);
2925 return true;
2926 }
2927
2928 if (dev->header_ops && dev->header_ops->validate)
2929 return dev->header_ops->validate(ll_header, len);
2930
2931 return false;
2932}
2933
Al Viro36fd6332017-06-26 13:19:16 -04002934typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
2935 int len, int size);
Joe Perchesf629d202013-09-26 14:48:15 -07002936int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937static inline int unregister_gifconf(unsigned int family)
2938{
2939 return register_gifconf(family, NULL);
2940}
2941
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002942#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04002943#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002944struct sd_flow_limit {
2945 u64 count;
2946 unsigned int num_buckets;
2947 unsigned int history_head;
2948 u16 history[FLOW_LIMIT_HISTORY];
2949 u8 buckets[];
2950};
2951
2952extern int netdev_flow_limit_table_len;
2953#endif /* CONFIG_NET_FLOW_LIMIT */
2954
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002956 * Incoming packets are placed on per-CPU queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08002958struct softnet_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 struct list_head poll_list;
Changli Gao6e7676c2010-04-27 15:07:33 -07002960 struct sk_buff_head process_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
Changli Gaodee42872010-05-02 05:42:16 +00002962 /* stats */
David S. Millercd7b5392010-05-02 22:27:59 -07002963 unsigned int processed;
2964 unsigned int time_squeeze;
David S. Millercd7b5392010-05-02 22:27:59 -07002965 unsigned int received_rps;
Changli Gaofd793d82010-04-15 00:16:59 -07002966#ifdef CONFIG_RPS
Eric Dumazet88751272010-04-19 05:07:33 +00002967 struct softnet_data *rps_ipi_list;
Eric Dumazet4cdb1e22014-11-02 06:00:12 -08002968#endif
2969#ifdef CONFIG_NET_FLOW_LIMIT
2970 struct sd_flow_limit __rcu *flow_limit;
2971#endif
2972 struct Qdisc *output_queue;
2973 struct Qdisc **output_queue_tailp;
2974 struct sk_buff *completion_queue;
Steffen Klassertf53c7232017-12-20 10:41:36 +01002975#ifdef CONFIG_XFRM_OFFLOAD
2976 struct sk_buff_head xfrm_backlog;
2977#endif
Eric Dumazet4cdb1e22014-11-02 06:00:12 -08002978#ifdef CONFIG_RPS
Eric Dumazet501e7ef2016-04-26 15:30:07 -07002979 /* input_queue_head should be written by cpu owning this struct,
2980 * and only read by other cpus. Worth using a cache line.
2981 */
2982 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2983
2984 /* Elements below can be accessed between CPUs for RPS/RFS */
Ying Huang966a9672017-08-08 12:30:00 +08002985 call_single_data_t csd ____cacheline_aligned_in_smp;
Eric Dumazet88751272010-04-19 05:07:33 +00002986 struct softnet_data *rps_ipi_next;
2987 unsigned int cpu;
Tom Herbert76cc8b12010-05-20 18:37:59 +00002988 unsigned int input_queue_tail;
Tom Herbert1e94d722010-03-18 17:45:44 -07002989#endif
Eric Dumazet95c96172012-04-15 05:58:06 +00002990 unsigned int dropped;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002991 struct sk_buff_head input_pkt_queue;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002992 struct napi_struct backlog;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002993
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994};
2995
Tom Herbert76cc8b12010-05-20 18:37:59 +00002996static inline void input_queue_head_incr(struct softnet_data *sd)
Tom Herbertfec5e652010-04-16 16:01:27 -07002997{
2998#ifdef CONFIG_RPS
Tom Herbert76cc8b12010-05-20 18:37:59 +00002999 sd->input_queue_head++;
3000#endif
3001}
3002
3003static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3004 unsigned int *qtail)
3005{
3006#ifdef CONFIG_RPS
3007 *qtail = ++sd->input_queue_tail;
Tom Herbertfec5e652010-04-16 16:01:27 -07003008#endif
3009}
3010
Tom Herbert0a9627f2010-03-16 08:03:29 +00003011DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012
Joe Perchesf629d202013-09-26 14:48:15 -07003013void __netif_schedule(struct Qdisc *q);
John Fastabend46e5da40a2014-09-12 20:04:52 -07003014void netif_schedule_queue(struct netdev_queue *txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003016static inline void netif_tx_schedule_all(struct net_device *dev)
3017{
3018 unsigned int i;
3019
3020 for (i = 0; i < dev->num_tx_queues; i++)
3021 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3022}
3023
Denys Vlasenkof9a7cbb2016-04-08 17:51:54 +02003024static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07003025{
Tom Herbert734664982011-11-28 16:32:44 +00003026 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07003027}
3028
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003029/**
3030 * netif_start_queue - allow transmit
3031 * @dev: network device
3032 *
3033 * Allow upper layers to call the device hard_start_xmit routine.
3034 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035static inline void netif_start_queue(struct net_device *dev)
3036{
David S. Millere8a04642008-07-17 00:34:19 -07003037 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038}
3039
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003040static inline void netif_tx_start_all_queues(struct net_device *dev)
3041{
3042 unsigned int i;
3043
3044 for (i = 0; i < dev->num_tx_queues; i++) {
3045 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3046 netif_tx_start_queue(txq);
3047 }
3048}
3049
John Fastabend46e5da40a2014-09-12 20:04:52 -07003050void netif_tx_wake_queue(struct netdev_queue *dev_queue);
David S. Miller79d16382008-07-08 23:14:46 -07003051
Dave Jonesd29f7492008-07-22 14:09:06 -07003052/**
3053 * netif_wake_queue - restart transmit
3054 * @dev: network device
3055 *
3056 * Allow upper layers to call the device hard_start_xmit routine.
3057 * Used for flow control when transmit resources are available.
3058 */
David S. Miller79d16382008-07-08 23:14:46 -07003059static inline void netif_wake_queue(struct net_device *dev)
3060{
David S. Millere8a04642008-07-17 00:34:19 -07003061 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062}
3063
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003064static inline void netif_tx_wake_all_queues(struct net_device *dev)
3065{
3066 unsigned int i;
3067
3068 for (i = 0; i < dev->num_tx_queues; i++) {
3069 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3070 netif_tx_wake_queue(txq);
3071 }
3072}
3073
Denys Vlasenkof9a7cbb2016-04-08 17:51:54 +02003074static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07003075{
Tom Herbert734664982011-11-28 16:32:44 +00003076 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07003077}
3078
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003079/**
3080 * netif_stop_queue - stop transmitted packets
3081 * @dev: network device
3082 *
3083 * Stop upper layers calling the device hard_start_xmit routine.
3084 * Used for flow control when transmit resources are unavailable.
3085 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086static inline void netif_stop_queue(struct net_device *dev)
3087{
David S. Millere8a04642008-07-17 00:34:19 -07003088 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089}
3090
Denys Vlasenkoa2029242015-05-11 21:17:53 +02003091void netif_tx_stop_all_queues(struct net_device *dev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003092
David S. Miller4d295152012-03-07 21:02:35 -05003093static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07003094{
Tom Herbert734664982011-11-28 16:32:44 +00003095 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07003096}
3097
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003098/**
3099 * netif_queue_stopped - test if transmit queue is flowblocked
3100 * @dev: network device
3101 *
3102 * Test if transmit queue on device is currently unable to send.
3103 */
David S. Miller4d295152012-03-07 21:02:35 -05003104static inline bool netif_queue_stopped(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105{
David S. Millere8a04642008-07-17 00:34:19 -07003106 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107}
3108
David S. Miller4d295152012-03-07 21:02:35 -05003109static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
David S. Millerc3f26a22008-07-31 16:58:50 -07003110{
Tom Herbert734664982011-11-28 16:32:44 +00003111 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3112}
3113
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +02003114static inline bool
3115netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
Tom Herbert734664982011-11-28 16:32:44 +00003116{
3117 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3118}
3119
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +02003120static inline bool
3121netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3122{
3123 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3124}
3125
Eric Dumazet53511452014-10-08 08:19:27 -07003126/**
3127 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3128 * @dev_queue: pointer to transmit queue
3129 *
3130 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05003131 * to give appropriate hint to the CPU.
Eric Dumazet53511452014-10-08 08:19:27 -07003132 */
3133static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3134{
3135#ifdef CONFIG_BQL
3136 prefetchw(&dev_queue->dql.num_queued);
3137#endif
3138}
3139
3140/**
3141 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3142 * @dev_queue: pointer to transmit queue
3143 *
3144 * BQL enabled drivers might use this helper in their TX completion path,
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05003145 * to give appropriate hint to the CPU.
Eric Dumazet53511452014-10-08 08:19:27 -07003146 */
3147static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3148{
3149#ifdef CONFIG_BQL
3150 prefetchw(&dev_queue->dql.limit);
3151#endif
3152}
3153
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003154static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3155 unsigned int bytes)
3156{
Tom Herbert114cf582011-11-28 16:33:09 +00003157#ifdef CONFIG_BQL
3158 dql_queued(&dev_queue->dql, bytes);
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00003159
3160 if (likely(dql_avail(&dev_queue->dql) >= 0))
3161 return;
3162
3163 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3164
3165 /*
3166 * The XOFF flag must be set before checking the dql_avail below,
3167 * because in netdev_tx_completed_queue we update the dql_completed
3168 * before checking the XOFF flag.
3169 */
3170 smp_mb();
3171
3172 /* check again in case another CPU has just made room avail */
3173 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3174 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
Tom Herbert114cf582011-11-28 16:33:09 +00003175#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003176}
3177
Florian Fainelli0042d0c2013-09-06 16:58:00 +01003178/**
3179 * netdev_sent_queue - report the number of bytes queued to hardware
3180 * @dev: network device
3181 * @bytes: number of bytes queued to the hardware device queue
3182 *
3183 * Report the number of bytes queued for sending/completion to the network
3184 * device hardware queue. @bytes should be a good approximation and should
3185 * exactly match netdev_completed_queue() @bytes
3186 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003187static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3188{
3189 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3190}
3191
3192static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
Eric Dumazet95c96172012-04-15 05:58:06 +00003193 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003194{
Tom Herbert114cf582011-11-28 16:33:09 +00003195#ifdef CONFIG_BQL
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00003196 if (unlikely(!bytes))
3197 return;
3198
3199 dql_completed(&dev_queue->dql, bytes);
3200
3201 /*
3202 * Without the memory barrier there is a small possiblity that
3203 * netdev_tx_sent_queue will miss the update and cause the queue to
3204 * be stopped forever
3205 */
3206 smp_mb();
3207
3208 if (dql_avail(&dev_queue->dql) < 0)
3209 return;
3210
3211 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3212 netif_schedule_queue(dev_queue);
Tom Herbert114cf582011-11-28 16:33:09 +00003213#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003214}
3215
Florian Fainelli0042d0c2013-09-06 16:58:00 +01003216/**
3217 * netdev_completed_queue - report bytes and packets completed by device
3218 * @dev: network device
3219 * @pkts: actual number of packets sent over the medium
3220 * @bytes: actual number of bytes sent over the medium
3221 *
3222 * Report the number of bytes and packets transmitted by the network device
3223 * hardware queue over the physical medium, @bytes must exactly match the
3224 * @bytes amount passed to netdev_sent_queue()
3225 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003226static inline void netdev_completed_queue(struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00003227 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003228{
3229 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3230}
3231
3232static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3233{
Tom Herbert114cf582011-11-28 16:33:09 +00003234#ifdef CONFIG_BQL
Alexander Duyck5c490352012-02-07 02:29:01 +00003235 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
Tom Herbert114cf582011-11-28 16:33:09 +00003236 dql_reset(&q->dql);
3237#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003238}
3239
Florian Fainelli0042d0c2013-09-06 16:58:00 +01003240/**
3241 * netdev_reset_queue - reset the packets and bytes count of a network device
3242 * @dev_queue: network device
3243 *
3244 * Reset the bytes and packet count of a network device and clear the
3245 * software flow control OFF bit for this network device
3246 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003247static inline void netdev_reset_queue(struct net_device *dev_queue)
3248{
3249 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
David S. Millerc3f26a22008-07-31 16:58:50 -07003250}
3251
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003252/**
Daniel Borkmannb9507bd2014-02-16 15:55:21 +01003253 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3254 * @dev: network device
3255 * @queue_index: given tx queue index
3256 *
3257 * Returns 0 if given tx queue index >= number of device tx queues,
3258 * otherwise returns the originally passed tx queue index.
3259 */
3260static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3261{
3262 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3263 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3264 dev->name, queue_index,
3265 dev->real_num_tx_queues);
3266 return 0;
3267 }
3268
3269 return queue_index;
3270}
3271
3272/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003273 * netif_running - test if up
3274 * @dev: network device
3275 *
3276 * Test if the device has been brought up.
3277 */
David S. Miller4d295152012-03-07 21:02:35 -05003278static inline bool netif_running(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279{
3280 return test_bit(__LINK_STATE_START, &dev->state);
3281}
3282
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003283/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05003284 * Routines to manage the subqueues on a device. We only need start,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003285 * stop, and a check if it's stopped. All other device management is
3286 * done at the overall netdevice level.
3287 * Also test the device if we're multiqueue.
3288 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003289
3290/**
3291 * netif_start_subqueue - allow sending packets on subqueue
3292 * @dev: network device
3293 * @queue_index: sub queue index
3294 *
3295 * Start individual transmit queue of a device with multiple transmit queues.
3296 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003297static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3298{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003299 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003300
3301 netif_tx_start_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003302}
3303
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003304/**
3305 * netif_stop_subqueue - stop sending packets on subqueue
3306 * @dev: network device
3307 * @queue_index: sub queue index
3308 *
3309 * Stop individual transmit queue of a device with multiple transmit queues.
3310 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003311static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3312{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003313 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003314 netif_tx_stop_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003315}
3316
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003317/**
3318 * netif_subqueue_stopped - test status of subqueue
3319 * @dev: network device
3320 * @queue_index: sub queue index
3321 *
3322 * Check individual transmit queue of a device with multiple transmit queues.
3323 */
David S. Miller4d295152012-03-07 21:02:35 -05003324static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3325 u16 queue_index)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003326{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003327 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003328
3329 return netif_tx_queue_stopped(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003330}
3331
David S. Miller4d295152012-03-07 21:02:35 -05003332static inline bool netif_subqueue_stopped(const struct net_device *dev,
3333 struct sk_buff *skb)
Pavel Emelyanov668f8952007-10-21 17:01:56 -07003334{
3335 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3336}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003337
Florian Fainelli738b35c2017-01-11 21:13:02 -08003338/**
3339 * netif_wake_subqueue - allow sending packets on subqueue
3340 * @dev: network device
3341 * @queue_index: sub queue index
3342 *
3343 * Resume individual transmit queue of a device with multiple transmit queues.
3344 */
3345static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3346{
3347 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3348
3349 netif_tx_wake_queue(txq);
3350}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003351
Alexander Duyck537c00d2013-01-10 08:57:02 +00003352#ifdef CONFIG_XPS
David S. Miller53af53a2013-10-08 23:07:53 -04003353int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
Joe Perchesf629d202013-09-26 14:48:15 -07003354 u16 index);
Amritha Nambiar80d19662018-06-29 21:26:41 -07003355int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3356 u16 index, bool is_rxqs_map);
3357
3358/**
3359 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3360 * @j: CPU/Rx queue index
3361 * @mask: bitmask of all cpus/rx queues
3362 * @nr_bits: number of bits in the bitmask
3363 *
3364 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3365 */
3366static inline bool netif_attr_test_mask(unsigned long j,
3367 const unsigned long *mask,
3368 unsigned int nr_bits)
3369{
3370 cpu_max_bits_warn(j, nr_bits);
3371 return test_bit(j, mask);
3372}
3373
3374/**
3375 * netif_attr_test_online - Test for online CPU/Rx queue
3376 * @j: CPU/Rx queue index
3377 * @online_mask: bitmask for CPUs/Rx queues that are online
3378 * @nr_bits: number of bits in the bitmask
3379 *
3380 * Returns true if a CPU/Rx queue is online.
3381 */
3382static inline bool netif_attr_test_online(unsigned long j,
3383 const unsigned long *online_mask,
3384 unsigned int nr_bits)
3385{
3386 cpu_max_bits_warn(j, nr_bits);
3387
3388 if (online_mask)
3389 return test_bit(j, online_mask);
3390
3391 return (j < nr_bits);
3392}
3393
3394/**
3395 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3396 * @n: CPU/Rx queue index
3397 * @srcp: the cpumask/Rx queue mask pointer
3398 * @nr_bits: number of bits in the bitmask
3399 *
3400 * Returns >= nr_bits if no further CPUs/Rx queues set.
3401 */
3402static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3403 unsigned int nr_bits)
3404{
3405 /* -1 is a legal arg here. */
3406 if (n != -1)
3407 cpu_max_bits_warn(n, nr_bits);
3408
3409 if (srcp)
3410 return find_next_bit(srcp, nr_bits, n + 1);
3411
3412 return n + 1;
3413}
3414
3415/**
3416 * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
3417 * @n: CPU/Rx queue index
3418 * @src1p: the first CPUs/Rx queues mask pointer
3419 * @src2p: the second CPUs/Rx queues mask pointer
3420 * @nr_bits: number of bits in the bitmask
3421 *
3422 * Returns >= nr_bits if no further CPUs/Rx queues set in both.
3423 */
3424static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3425 const unsigned long *src2p,
3426 unsigned int nr_bits)
3427{
3428 /* -1 is a legal arg here. */
3429 if (n != -1)
3430 cpu_max_bits_warn(n, nr_bits);
3431
3432 if (src1p && src2p)
3433 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3434 else if (src1p)
3435 return find_next_bit(src1p, nr_bits, n + 1);
3436 else if (src2p)
3437 return find_next_bit(src2p, nr_bits, n + 1);
3438
3439 return n + 1;
3440}
Alexander Duyck537c00d2013-01-10 08:57:02 +00003441#else
3442static inline int netif_set_xps_queue(struct net_device *dev,
Michael S. Tsirkin35735402013-10-02 09:14:06 +03003443 const struct cpumask *mask,
Alexander Duyck537c00d2013-01-10 08:57:02 +00003444 u16 index)
3445{
3446 return 0;
3447}
Krzysztof Kozlowskic9fbb2d2018-08-10 10:47:43 +02003448
3449static inline int __netif_set_xps_queue(struct net_device *dev,
3450 const unsigned long *mask,
3451 u16 index, bool is_rxqs_map)
3452{
3453 return 0;
3454}
Alexander Duyck537c00d2013-01-10 08:57:02 +00003455#endif
3456
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003457/**
3458 * netif_is_multiqueue - test if device has multiple transmit queues
3459 * @dev: network device
3460 *
3461 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003462 */
David S. Miller4d295152012-03-07 21:02:35 -05003463static inline bool netif_is_multiqueue(const struct net_device *dev)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003464{
Eric Dumazeta02cec22010-09-22 20:43:57 +00003465 return dev->num_tx_queues > 1;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003466}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
Joe Perchesf629d202013-09-26 14:48:15 -07003468int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
John Fastabendf0796d52010-07-01 13:21:57 +00003469
Michael Daltona953be52014-01-16 22:23:28 -08003470#ifdef CONFIG_SYSFS
Joe Perchesf629d202013-09-26 14:48:15 -07003471int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003472#else
3473static inline int netif_set_real_num_rx_queues(struct net_device *dev,
Jakub Kicinskic29c2eb2018-07-30 20:43:51 -07003474 unsigned int rxqs)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003475{
Jakub Kicinskic29c2eb2018-07-30 20:43:51 -07003476 dev->real_num_rx_queues = rxqs;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003477 return 0;
3478}
3479#endif
3480
Daniel Borkmann65073a62018-01-31 12:58:56 +01003481static inline struct netdev_rx_queue *
3482__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3483{
3484 return dev->_rx + rxq;
3485}
3486
Michael Daltona953be52014-01-16 22:23:28 -08003487#ifdef CONFIG_SYSFS
3488static inline unsigned int get_netdev_rx_queue_index(
3489 struct netdev_rx_queue *queue)
3490{
3491 struct net_device *dev = queue->dev;
3492 int index = queue - dev->_rx;
3493
3494 BUG_ON(index >= dev->num_rx_queues);
3495 return index;
3496}
3497#endif
3498
Yuval Mintz16917b82012-07-01 03:18:50 +00003499#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
Joe Perchesf629d202013-09-26 14:48:15 -07003500int netif_get_num_default_rss_queues(void);
Yuval Mintz16917b82012-07-01 03:18:50 +00003501
Eric Dumazete6247022013-12-05 04:45:08 -08003502enum skb_free_reason {
3503 SKB_REASON_CONSUMED,
3504 SKB_REASON_DROPPED,
3505};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506
Eric Dumazete6247022013-12-05 04:45:08 -08003507void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3508void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3509
3510/*
3511 * It is not allowed to call kfree_skb() or consume_skb() from hardware
3512 * interrupt context or with hardware interrupts being disabled.
3513 * (in_irq() || irqs_disabled())
3514 *
3515 * We provide four helpers that can be used in following contexts :
3516 *
3517 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3518 * replacing kfree_skb(skb)
3519 *
3520 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3521 * Typically used in place of consume_skb(skb) in TX completion path
3522 *
3523 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3524 * replacing kfree_skb(skb)
3525 *
3526 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3527 * and consumed a packet. Used in place of consume_skb(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 */
Eric Dumazete6247022013-12-05 04:45:08 -08003529static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3530{
3531 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3532}
3533
3534static inline void dev_consume_skb_irq(struct sk_buff *skb)
3535{
3536 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3537}
3538
3539static inline void dev_kfree_skb_any(struct sk_buff *skb)
3540{
3541 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3542}
3543
3544static inline void dev_consume_skb_any(struct sk_buff *skb)
3545{
3546 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3547}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
Jason Wang7c497472017-08-11 19:41:17 +08003549void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3550int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
Joe Perchesf629d202013-09-26 14:48:15 -07003551int netif_rx(struct sk_buff *skb);
3552int netif_rx_ni(struct sk_buff *skb);
Eric W. Biederman04eb4482015-09-15 20:04:15 -05003553int netif_receive_skb(struct sk_buff *skb);
Jesper Dangaard Brouer1c601d82017-10-16 12:19:39 +02003554int netif_receive_skb_core(struct sk_buff *skb);
Edward Creef6ad8c12018-07-02 16:12:45 +01003555void netif_receive_skb_list(struct list_head *head);
Joe Perchesf629d202013-09-26 14:48:15 -07003556gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3557void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3558struct sk_buff *napi_get_frags(struct napi_struct *napi);
3559gro_result_t napi_gro_frags(struct napi_struct *napi);
Jerry Chubf5a7552014-01-07 10:23:19 -08003560struct packet_offload *gro_find_receive_by_type(__be16 type);
3561struct packet_offload *gro_find_complete_by_type(__be16 type);
Herbert Xu76620aa2009-04-16 02:02:07 -07003562
3563static inline void napi_free_frags(struct napi_struct *napi)
3564{
3565 kfree_skb(napi->skb);
3566 napi->skb = NULL;
3567}
3568
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07003569bool netdev_is_rx_handler_busy(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07003570int netdev_rx_handler_register(struct net_device *dev,
3571 rx_handler_func_t *rx_handler,
3572 void *rx_handler_data);
3573void netdev_rx_handler_unregister(struct net_device *dev);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003574
Joe Perchesf629d202013-09-26 14:48:15 -07003575bool dev_valid_name(const char *name);
Al Viro44c02a22017-10-05 12:59:44 -04003576int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3577 bool *need_copyout);
Al Viro36fd6332017-06-26 13:19:16 -04003578int dev_ifconf(struct net *net, struct ifconf *, int);
Joe Perchesf629d202013-09-26 14:48:15 -07003579int dev_ethtool(struct net *net, struct ifreq *);
3580unsigned int dev_get_flags(const struct net_device *);
3581int __dev_change_flags(struct net_device *, unsigned int flags);
3582int dev_change_flags(struct net_device *, unsigned int);
David S. Millercb178192013-09-30 15:36:45 -04003583void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3584 unsigned int gchanges);
Joe Perchesf629d202013-09-26 14:48:15 -07003585int dev_change_name(struct net_device *, const char *);
3586int dev_set_alias(struct net_device *, const char *, size_t);
Florian Westphal6c557002017-10-02 23:50:05 +02003587int dev_get_alias(const struct net_device *, char *, size_t);
Joe Perchesf629d202013-09-26 14:48:15 -07003588int dev_change_net_namespace(struct net_device *, struct net *, const char *);
WANG Congf51048c2017-07-06 15:01:57 -07003589int __dev_set_mtu(struct net_device *, int);
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07003590int dev_set_mtu_ext(struct net_device *dev, int mtu,
3591 struct netlink_ext_ack *extack);
Joe Perchesf629d202013-09-26 14:48:15 -07003592int dev_set_mtu(struct net_device *, int);
Cong Wang6a643dd2018-01-25 18:26:22 -08003593int dev_change_tx_queue_len(struct net_device *, unsigned long);
Joe Perchesf629d202013-09-26 14:48:15 -07003594void dev_set_group(struct net_device *, int);
3595int dev_set_mac_address(struct net_device *, struct sockaddr *);
3596int dev_change_carrier(struct net_device *, bool new_carrier);
3597int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01003598 struct netdev_phys_item_id *ppid);
David Aherndb24a902015-03-17 20:23:15 -06003599int dev_get_phys_port_name(struct net_device *dev,
3600 char *name, size_t len);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07003601int dev_change_proto_down(struct net_device *dev, bool proto_down);
Steffen Klassertf53c7232017-12-20 10:41:36 +01003602struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
David S. Millerce937182014-08-30 19:22:20 -07003603struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3604 struct netdev_queue *txq, int *ret);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02003605
Jakub Kicinskif4e63522017-11-03 13:56:16 -07003606typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02003607int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3608 int fd, u32 flags);
Jakub Kicinskia25717d2018-07-11 20:36:41 -07003609u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3610 enum bpf_netdev_command cmd);
Jakub Kicinski84c6b862018-07-30 20:43:53 -07003611int xdp_umem_query(struct net_device *dev, u16 queue_id);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02003612
Herbert Xua0265d22014-04-17 13:45:03 +08003613int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
Joe Perchesf629d202013-09-26 14:48:15 -07003614int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02003615bool is_skb_forwardable(const struct net_device *dev,
3616 const struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08003618static __always_inline int ____dev_forward_skb(struct net_device *dev,
3619 struct sk_buff *skb)
3620{
3621 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3622 unlikely(!is_skb_forwardable(dev, skb))) {
3623 atomic_long_inc(&dev->rx_dropped);
3624 kfree_skb(skb);
3625 return NET_RX_DROP;
3626 }
3627
3628 skb_scrub_packet(skb, true);
3629 skb->priority = 0;
3630 return 0;
3631}
3632
David Ahern74b20582016-05-10 11:19:50 -07003633void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3634
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03003635extern int netdev_budget;
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04003636extern unsigned int netdev_budget_usecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637
3638/* Called by rtnetlink.c:rtnl_unlock() */
Joe Perchesf629d202013-09-26 14:48:15 -07003639void netdev_run_todo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003641/**
3642 * dev_put - release reference to device
3643 * @dev: network device
3644 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07003645 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003646 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647static inline void dev_put(struct net_device *dev)
3648{
Christoph Lameter933393f2011-12-22 11:58:51 -06003649 this_cpu_dec(*dev->pcpu_refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650}
3651
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003652/**
3653 * dev_hold - get reference to device
3654 * @dev: network device
3655 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07003656 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003657 */
Stephen Hemminger15333062006-03-20 22:32:28 -08003658static inline void dev_hold(struct net_device *dev)
3659{
Christoph Lameter933393f2011-12-22 11:58:51 -06003660 this_cpu_inc(*dev->pcpu_refcnt);
Stephen Hemminger15333062006-03-20 22:32:28 -08003661}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662
3663/* Carrier loss detection, dial on demand. The functions netif_carrier_on
3664 * and _off may be called from IRQ context, but it is caller
3665 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08003666 *
3667 * The name carrier is inappropriate, these functions should really be
3668 * called netif_lowerlayer_*() because they represent the state of any
3669 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 */
3671
Joe Perchesf629d202013-09-26 14:48:15 -07003672void linkwatch_init_dev(struct net_device *dev);
3673void linkwatch_fire_event(struct net_device *dev);
3674void linkwatch_forget_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003676/**
3677 * netif_carrier_ok - test if carrier present
3678 * @dev: network device
3679 *
3680 * Check if carrier is present on device
3681 */
David S. Miller4d295152012-03-07 21:02:35 -05003682static inline bool netif_carrier_ok(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683{
3684 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3685}
3686
Joe Perchesf629d202013-09-26 14:48:15 -07003687unsigned long dev_trans_start(struct net_device *dev);
Eric Dumazet9d214932009-05-17 20:55:16 -07003688
Joe Perchesf629d202013-09-26 14:48:15 -07003689void __netdev_watchdog_up(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690
Joe Perchesf629d202013-09-26 14:48:15 -07003691void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692
Joe Perchesf629d202013-09-26 14:48:15 -07003693void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003695/**
3696 * netif_dormant_on - mark device as dormant.
3697 * @dev: network device
3698 *
3699 * Mark device as dormant (as per RFC2863).
3700 *
3701 * The dormant state indicates that the relevant interface is not
3702 * actually in a condition to pass packets (i.e., it is not 'up') but is
3703 * in a "pending" state, waiting for some external event. For "on-
3704 * demand" interfaces, this new state identifies the situation where the
3705 * interface is waiting for events to place it in the up state.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003706 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08003707static inline void netif_dormant_on(struct net_device *dev)
3708{
3709 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3710 linkwatch_fire_event(dev);
3711}
3712
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003713/**
3714 * netif_dormant_off - set device as not dormant.
3715 * @dev: network device
3716 *
3717 * Device is not in dormant state.
3718 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08003719static inline void netif_dormant_off(struct net_device *dev)
3720{
3721 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3722 linkwatch_fire_event(dev);
3723}
3724
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003725/**
Zhang Shengju8ecbc402017-04-26 11:05:12 +08003726 * netif_dormant - test if device is dormant
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003727 * @dev: network device
3728 *
Zhang Shengju8ecbc402017-04-26 11:05:12 +08003729 * Check if device is dormant.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003730 */
David S. Miller4d295152012-03-07 21:02:35 -05003731static inline bool netif_dormant(const struct net_device *dev)
Stefan Rompfb00055a2006-03-20 17:09:11 -08003732{
3733 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3734}
3735
3736
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003737/**
3738 * netif_oper_up - test if device is operational
3739 * @dev: network device
3740 *
3741 * Check if carrier is operational
3742 */
David S. Miller4d295152012-03-07 21:02:35 -05003743static inline bool netif_oper_up(const struct net_device *dev)
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08003744{
Stefan Rompfb00055a2006-03-20 17:09:11 -08003745 return (dev->operstate == IF_OPER_UP ||
3746 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3747}
3748
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003749/**
3750 * netif_device_present - is device available or removed
3751 * @dev: network device
3752 *
3753 * Check if device has not been removed from system.
3754 */
David S. Miller4d295152012-03-07 21:02:35 -05003755static inline bool netif_device_present(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756{
3757 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3758}
3759
Joe Perchesf629d202013-09-26 14:48:15 -07003760void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761
Joe Perchesf629d202013-09-26 14:48:15 -07003762void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763
3764/*
3765 * Network interface message level settings
3766 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767
3768enum {
3769 NETIF_MSG_DRV = 0x0001,
3770 NETIF_MSG_PROBE = 0x0002,
3771 NETIF_MSG_LINK = 0x0004,
3772 NETIF_MSG_TIMER = 0x0008,
3773 NETIF_MSG_IFDOWN = 0x0010,
3774 NETIF_MSG_IFUP = 0x0020,
3775 NETIF_MSG_RX_ERR = 0x0040,
3776 NETIF_MSG_TX_ERR = 0x0080,
3777 NETIF_MSG_TX_QUEUED = 0x0100,
3778 NETIF_MSG_INTR = 0x0200,
3779 NETIF_MSG_TX_DONE = 0x0400,
3780 NETIF_MSG_RX_STATUS = 0x0800,
3781 NETIF_MSG_PKTDATA = 0x1000,
3782 NETIF_MSG_HW = 0x2000,
3783 NETIF_MSG_WOL = 0x4000,
3784};
3785
3786#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3787#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3788#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3789#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3790#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3791#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3792#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3793#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3794#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3795#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3796#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3797#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3798#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3799#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3800#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3801
3802static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3803{
3804 /* use default */
3805 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3806 return default_msg_enable_bits;
3807 if (debug_value == 0) /* no output */
3808 return 0;
3809 /* set low N bits */
3810 return (1 << debug_value) - 1;
3811}
3812
David S. Millerc773e842008-07-08 23:13:53 -07003813static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07003814{
David S. Millerc773e842008-07-08 23:13:53 -07003815 spin_lock(&txq->_xmit_lock);
3816 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003817}
3818
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02003819static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3820{
3821 __acquire(&txq->_xmit_lock);
3822 return true;
3823}
3824
3825static inline void __netif_tx_release(struct netdev_queue *txq)
3826{
3827 __release(&txq->_xmit_lock);
3828}
3829
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003830static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3831{
3832 spin_lock_bh(&txq->_xmit_lock);
3833 txq->xmit_lock_owner = smp_processor_id();
3834}
3835
David S. Miller4d295152012-03-07 21:02:35 -05003836static inline bool __netif_tx_trylock(struct netdev_queue *txq)
David S. Millerc773e842008-07-08 23:13:53 -07003837{
David S. Miller4d295152012-03-07 21:02:35 -05003838 bool ok = spin_trylock(&txq->_xmit_lock);
David S. Millerc773e842008-07-08 23:13:53 -07003839 if (likely(ok))
3840 txq->xmit_lock_owner = smp_processor_id();
3841 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07003842}
3843
David S. Millerc773e842008-07-08 23:13:53 -07003844static inline void __netif_tx_unlock(struct netdev_queue *txq)
3845{
3846 txq->xmit_lock_owner = -1;
3847 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07003848}
3849
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003850static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3851{
3852 txq->xmit_lock_owner = -1;
3853 spin_unlock_bh(&txq->_xmit_lock);
3854}
3855
Eric Dumazet08baf562009-05-25 22:58:01 -07003856static inline void txq_trans_update(struct netdev_queue *txq)
3857{
3858 if (txq->xmit_lock_owner != -1)
3859 txq->trans_start = jiffies;
3860}
3861
Florian Westphalba162f82016-05-03 16:31:00 +02003862/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
3863static inline void netif_trans_update(struct net_device *dev)
3864{
Florian Westphal9b366272016-05-03 16:33:14 +02003865 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3866
3867 if (txq->trans_start != jiffies)
3868 txq->trans_start = jiffies;
Florian Westphalba162f82016-05-03 16:31:00 +02003869}
3870
David S. Millerc3f26a22008-07-31 16:58:50 -07003871/**
3872 * netif_tx_lock - grab network device transmit lock
3873 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07003874 *
3875 * Get network device transmit lock
3876 */
3877static inline void netif_tx_lock(struct net_device *dev)
3878{
3879 unsigned int i;
3880 int cpu;
3881
3882 spin_lock(&dev->tx_global_lock);
3883 cpu = smp_processor_id();
3884 for (i = 0; i < dev->num_tx_queues; i++) {
3885 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3886
3887 /* We are the only thread of execution doing a
3888 * freeze, but we have to grab the _xmit_lock in
3889 * order to synchronize with threads which are in
3890 * the ->hard_start_xmit() handler and already
3891 * checked the frozen bit.
3892 */
3893 __netif_tx_lock(txq, cpu);
3894 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3895 __netif_tx_unlock(txq);
3896 }
3897}
3898
3899static inline void netif_tx_lock_bh(struct net_device *dev)
3900{
3901 local_bh_disable();
3902 netif_tx_lock(dev);
3903}
3904
Herbert Xu932ff272006-06-09 12:20:56 -07003905static inline void netif_tx_unlock(struct net_device *dev)
3906{
David S. Millere8a04642008-07-17 00:34:19 -07003907 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07003908
David S. Millere8a04642008-07-17 00:34:19 -07003909 for (i = 0; i < dev->num_tx_queues; i++) {
3910 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07003911
David S. Millerc3f26a22008-07-31 16:58:50 -07003912 /* No need to grab the _xmit_lock here. If the
3913 * queue is not stopped for another reason, we
3914 * force a schedule.
3915 */
3916 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003917 netif_schedule_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07003918 }
3919 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07003920}
3921
3922static inline void netif_tx_unlock_bh(struct net_device *dev)
3923{
David S. Millere8a04642008-07-17 00:34:19 -07003924 netif_tx_unlock(dev);
3925 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07003926}
3927
David S. Millerc773e842008-07-08 23:13:53 -07003928#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003929 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07003930 __netif_tx_lock(txq, cpu); \
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02003931 } else { \
3932 __netif_tx_acquire(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003933 } \
3934}
3935
Eric W. Biederman5efeac42014-03-27 15:42:20 -07003936#define HARD_TX_TRYLOCK(dev, txq) \
3937 (((dev->features & NETIF_F_LLTX) == 0) ? \
3938 __netif_tx_trylock(txq) : \
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02003939 __netif_tx_acquire(txq))
Eric W. Biederman5efeac42014-03-27 15:42:20 -07003940
David S. Millerc773e842008-07-08 23:13:53 -07003941#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003942 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07003943 __netif_tx_unlock(txq); \
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02003944 } else { \
3945 __netif_tx_release(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003946 } \
3947}
3948
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949static inline void netif_tx_disable(struct net_device *dev)
3950{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003951 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07003952 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003953
David S. Millerc3f26a22008-07-31 16:58:50 -07003954 local_bh_disable();
3955 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003956 for (i = 0; i < dev->num_tx_queues; i++) {
3957 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07003958
3959 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003960 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07003961 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003962 }
David S. Millerc3f26a22008-07-31 16:58:50 -07003963 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964}
3965
David S. Millere308a5d2008-07-15 00:13:44 -07003966static inline void netif_addr_lock(struct net_device *dev)
3967{
3968 spin_lock(&dev->addr_list_lock);
3969}
3970
Jiri Pirko2429f7a2012-01-09 06:36:54 +00003971static inline void netif_addr_lock_nested(struct net_device *dev)
3972{
Vlad Yasevich25175ba2014-05-16 17:04:54 -04003973 int subclass = SINGLE_DEPTH_NESTING;
3974
3975 if (dev->netdev_ops->ndo_get_lock_subclass)
3976 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
3977
3978 spin_lock_nested(&dev->addr_list_lock, subclass);
Jiri Pirko2429f7a2012-01-09 06:36:54 +00003979}
3980
David S. Millere308a5d2008-07-15 00:13:44 -07003981static inline void netif_addr_lock_bh(struct net_device *dev)
3982{
3983 spin_lock_bh(&dev->addr_list_lock);
3984}
3985
3986static inline void netif_addr_unlock(struct net_device *dev)
3987{
3988 spin_unlock(&dev->addr_list_lock);
3989}
3990
3991static inline void netif_addr_unlock_bh(struct net_device *dev)
3992{
3993 spin_unlock_bh(&dev->addr_list_lock);
3994}
3995
Jiri Pirkof001fde2009-05-05 02:48:28 +00003996/*
Jiri Pirko31278e72009-06-17 01:12:19 +00003997 * dev_addrs walker. Should be used only for read access. Call with
Jiri Pirkof001fde2009-05-05 02:48:28 +00003998 * rcu_read_lock held.
3999 */
4000#define for_each_dev_addr(dev, ha) \
Jiri Pirko31278e72009-06-17 01:12:19 +00004001 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00004002
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003/* These functions live elsewhere (drivers/net/net_init.c, but related) */
4004
Joe Perchesf629d202013-09-26 14:48:15 -07004005void ether_setup(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006
4007/* Support for loadable net-drivers */
Joe Perchesf629d202013-09-26 14:48:15 -07004008struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02004009 unsigned char name_assign_type,
Joe Perchesf629d202013-09-26 14:48:15 -07004010 void (*setup)(struct net_device *),
4011 unsigned int txqs, unsigned int rxqs);
Cong Wang0ad646c2017-10-13 11:58:53 -07004012int dev_get_valid_name(struct net *net, struct net_device *dev,
4013 const char *name);
4014
Tom Gundersenc835a672014-07-14 16:37:24 +02004015#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4016 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
Tom Herbert36909ea2011-01-09 19:36:31 +00004017
Tom Gundersenc835a672014-07-14 16:37:24 +02004018#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4019 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4020 count)
Tom Herbert36909ea2011-01-09 19:36:31 +00004021
Joe Perchesf629d202013-09-26 14:48:15 -07004022int register_netdev(struct net_device *dev);
4023void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004024
Jiri Pirko22bedad32010-04-01 21:22:57 +00004025/* General hardware address lists handling functions */
Joe Perchesf629d202013-09-26 14:48:15 -07004026int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4027 struct netdev_hw_addr_list *from_list, int addr_len);
4028void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4029 struct netdev_hw_addr_list *from_list, int addr_len);
Alexander Duyck670e5b82014-05-28 18:44:46 -07004030int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4031 struct net_device *dev,
4032 int (*sync)(struct net_device *, const unsigned char *),
4033 int (*unsync)(struct net_device *,
4034 const unsigned char *));
4035void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4036 struct net_device *dev,
4037 int (*unsync)(struct net_device *,
4038 const unsigned char *));
Joe Perchesf629d202013-09-26 14:48:15 -07004039void __hw_addr_init(struct netdev_hw_addr_list *list);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004040
Jiri Pirkof001fde2009-05-05 02:48:28 +00004041/* Functions used for device addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07004042int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4043 unsigned char addr_type);
4044int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4045 unsigned char addr_type);
Joe Perchesf629d202013-09-26 14:48:15 -07004046void dev_addr_flush(struct net_device *dev);
4047int dev_addr_init(struct net_device *dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004048
4049/* Functions used for unicast addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07004050int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4051int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4052int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4053int dev_uc_sync(struct net_device *to, struct net_device *from);
4054int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4055void dev_uc_unsync(struct net_device *to, struct net_device *from);
4056void dev_uc_flush(struct net_device *dev);
4057void dev_uc_init(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004058
Alexander Duyck670e5b82014-05-28 18:44:46 -07004059/**
4060 * __dev_uc_sync - Synchonize device's unicast list
4061 * @dev: device to sync
4062 * @sync: function to call if address should be added
4063 * @unsync: function to call if address should be removed
4064 *
4065 * Add newly added addresses to the interface, and release
4066 * addresses that have been deleted.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004067 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004068static inline int __dev_uc_sync(struct net_device *dev,
4069 int (*sync)(struct net_device *,
4070 const unsigned char *),
4071 int (*unsync)(struct net_device *,
4072 const unsigned char *))
4073{
4074 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4075}
4076
4077/**
Masanari Iidae793c0f2014-09-04 23:44:36 +09004078 * __dev_uc_unsync - Remove synchronized addresses from device
Alexander Duyck670e5b82014-05-28 18:44:46 -07004079 * @dev: device to sync
4080 * @unsync: function to call if address should be removed
4081 *
4082 * Remove all addresses that were added to the device by dev_uc_sync().
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004083 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004084static inline void __dev_uc_unsync(struct net_device *dev,
4085 int (*unsync)(struct net_device *,
4086 const unsigned char *))
4087{
4088 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4089}
4090
Jiri Pirko22bedad32010-04-01 21:22:57 +00004091/* Functions used for multicast addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07004092int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4093int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4094int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4095int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4096int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4097int dev_mc_sync(struct net_device *to, struct net_device *from);
4098int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4099void dev_mc_unsync(struct net_device *to, struct net_device *from);
4100void dev_mc_flush(struct net_device *dev);
4101void dev_mc_init(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08004102
Alexander Duyck670e5b82014-05-28 18:44:46 -07004103/**
4104 * __dev_mc_sync - Synchonize device's multicast list
4105 * @dev: device to sync
4106 * @sync: function to call if address should be added
4107 * @unsync: function to call if address should be removed
4108 *
4109 * Add newly added addresses to the interface, and release
4110 * addresses that have been deleted.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004111 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004112static inline int __dev_mc_sync(struct net_device *dev,
4113 int (*sync)(struct net_device *,
4114 const unsigned char *),
4115 int (*unsync)(struct net_device *,
4116 const unsigned char *))
4117{
4118 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4119}
4120
4121/**
Masanari Iidae793c0f2014-09-04 23:44:36 +09004122 * __dev_mc_unsync - Remove synchronized addresses from device
Alexander Duyck670e5b82014-05-28 18:44:46 -07004123 * @dev: device to sync
4124 * @unsync: function to call if address should be removed
4125 *
4126 * Remove all addresses that were added to the device by dev_mc_sync().
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004127 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004128static inline void __dev_mc_unsync(struct net_device *dev,
4129 int (*unsync)(struct net_device *,
4130 const unsigned char *))
4131{
4132 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4133}
4134
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135/* Functions used for secondary unicast and multicast support */
Joe Perchesf629d202013-09-26 14:48:15 -07004136void dev_set_rx_mode(struct net_device *dev);
4137void __dev_set_rx_mode(struct net_device *dev);
4138int dev_set_promiscuity(struct net_device *dev, int inc);
4139int dev_set_allmulti(struct net_device *dev, int inc);
4140void netdev_state_change(struct net_device *dev);
4141void netdev_notify_peers(struct net_device *dev);
4142void netdev_features_change(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08004143/* Load a device via the kmod */
Joe Perchesf629d202013-09-26 14:48:15 -07004144void dev_load(struct net *net, const char *name);
4145struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4146 struct rtnl_link_stats64 *storage);
4147void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4148 const struct net_device_stats *netdev_stats);
Herbert Xufb286bb2005-11-10 13:01:24 -08004149
4150extern int netdev_max_backlog;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004151extern int netdev_tstamp_prequeue;
Herbert Xufb286bb2005-11-10 13:01:24 -08004152extern int weight_p;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01004153extern int dev_weight_rx_bias;
4154extern int dev_weight_tx_bias;
4155extern int dev_rx_weight;
4156extern int dev_tx_weight;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004157
Joe Perchesf629d202013-09-26 14:48:15 -07004158bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
Vlad Yasevich44a40852014-05-16 17:20:38 -04004159struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4160 struct list_head **iter);
Joe Perchesf629d202013-09-26 14:48:15 -07004161struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4162 struct list_head **iter);
Veaceslav Falico8b5be852013-08-28 23:25:08 +02004163
4164/* iterate through upper list, must be called under RCU read lock */
Vlad Yasevich44a40852014-05-16 17:20:38 -04004165#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4166 for (iter = &(dev)->adj_list.upper, \
4167 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4168 updev; \
4169 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4170
David Ahern1a3f0602016-10-17 19:15:44 -07004171int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4172 int (*fn)(struct net_device *upper_dev,
4173 void *data),
4174 void *data);
4175
4176bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4177 struct net_device *upper_dev);
4178
Ido Schimmel25cc72a2017-09-01 10:52:31 +02004179bool netdev_has_any_upper_dev(struct net_device *dev);
4180
Joe Perchesf629d202013-09-26 14:48:15 -07004181void *netdev_lower_get_next_private(struct net_device *dev,
4182 struct list_head **iter);
4183void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4184 struct list_head **iter);
Veaceslav Falico31088a12013-09-25 09:20:12 +02004185
4186#define netdev_for_each_lower_private(dev, priv, iter) \
4187 for (iter = (dev)->adj_list.lower.next, \
4188 priv = netdev_lower_get_next_private(dev, &(iter)); \
4189 priv; \
4190 priv = netdev_lower_get_next_private(dev, &(iter)))
4191
4192#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4193 for (iter = &(dev)->adj_list.lower, \
4194 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4195 priv; \
4196 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4197
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004198void *netdev_lower_get_next(struct net_device *dev,
4199 struct list_head **iter);
Jiri Pirko7ce856a2016-07-04 08:23:12 +02004200
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004201#define netdev_for_each_lower_dev(dev, ldev, iter) \
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01004202 for (iter = (dev)->adj_list.lower.next, \
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004203 ldev = netdev_lower_get_next(dev, &(iter)); \
4204 ldev; \
4205 ldev = netdev_lower_get_next(dev, &(iter)))
4206
Jiri Pirko7ce856a2016-07-04 08:23:12 +02004207struct net_device *netdev_all_lower_get_next(struct net_device *dev,
4208 struct list_head **iter);
4209struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
4210 struct list_head **iter);
4211
David Ahern1a3f0602016-10-17 19:15:44 -07004212int netdev_walk_all_lower_dev(struct net_device *dev,
4213 int (*fn)(struct net_device *lower_dev,
4214 void *data),
4215 void *data);
4216int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4217 int (*fn)(struct net_device *lower_dev,
4218 void *data),
4219 void *data);
4220
Joe Perchesf629d202013-09-26 14:48:15 -07004221void *netdev_adjacent_get_private(struct list_head *adj_list);
dingtianhonge001bfa2013-12-13 10:19:55 +08004222void *netdev_lower_get_first_private_rcu(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07004223struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4224struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
David Ahern42ab19e2017-10-04 17:48:47 -07004225int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4226 struct netlink_ext_ack *extack);
Joe Perchesf629d202013-09-26 14:48:15 -07004227int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01004228 struct net_device *upper_dev,
David Ahern42ab19e2017-10-04 17:48:47 -07004229 void *upper_priv, void *upper_info,
4230 struct netlink_ext_ack *extack);
Joe Perchesf629d202013-09-26 14:48:15 -07004231void netdev_upper_dev_unlink(struct net_device *dev,
4232 struct net_device *upper_dev);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01004233void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
Joe Perchesf629d202013-09-26 14:48:15 -07004234void *netdev_lower_dev_get_private(struct net_device *dev,
4235 struct net_device *lower_dev);
Jiri Pirko04d48262015-12-03 12:12:15 +01004236void netdev_lower_state_changed(struct net_device *lower_dev,
4237 void *lower_state_info);
Eric Dumazet960fb622014-11-16 06:23:05 -08004238
4239/* RSS keys are 40 or 52 bytes long */
4240#define NETDEV_RSS_KEY_LEN 52
Kim Jonesba905f52016-02-02 03:51:16 +00004241extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
Eric Dumazet960fb622014-11-16 06:23:05 -08004242void netdev_rss_key_fill(void *buffer, size_t len);
4243
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02004244int dev_get_nest_level(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07004245int skb_checksum_help(struct sk_buff *skb);
Davide Carattib72b5bf2017-05-18 15:44:38 +02004246int skb_crc32c_csum_help(struct sk_buff *skb);
Davide Caratti43c26a12017-05-18 15:44:41 +02004247int skb_csum_hwoffload_help(struct sk_buff *skb,
4248 const netdev_features_t features);
4249
Joe Perchesf629d202013-09-26 14:48:15 -07004250struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4251 netdev_features_t features, bool tx_path);
4252struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4253 netdev_features_t features);
Cong Wang12b00042013-02-05 16:36:38 +00004254
Moni Shoua61bd3852015-02-03 16:48:29 +02004255struct netdev_bonding_info {
4256 ifslave slave;
4257 ifbond master;
4258};
4259
4260struct netdev_notifier_bonding_info {
4261 struct netdev_notifier_info info; /* must be first */
4262 struct netdev_bonding_info bonding_info;
4263};
4264
4265void netdev_bonding_info_change(struct net_device *dev,
4266 struct netdev_bonding_info *bonding_info);
4267
Cong Wang12b00042013-02-05 16:36:38 +00004268static inline
4269struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4270{
4271 return __skb_gso_segment(skb, features, true);
4272}
Vlad Yasevich53d64712014-03-27 17:26:18 -04004273__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004274
4275static inline bool can_checksum_protocol(netdev_features_t features,
4276 __be16 protocol)
4277{
Tom Herbertc8cd0982015-12-14 11:19:44 -08004278 if (protocol == htons(ETH_P_FCOE))
4279 return !!(features & NETIF_F_FCOE_CRC);
4280
4281 /* Assume this is an IP checksum (not SCTP CRC) */
4282
4283 if (features & NETIF_F_HW_CSUM) {
4284 /* Can checksum everything */
4285 return true;
4286 }
4287
4288 switch (protocol) {
4289 case htons(ETH_P_IP):
4290 return !!(features & NETIF_F_IP_CSUM);
4291 case htons(ETH_P_IPV6):
4292 return !!(features & NETIF_F_IPV6_CSUM);
4293 default:
4294 return false;
4295 }
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004296}
Cong Wang12b00042013-02-05 16:36:38 +00004297
Herbert Xufb286bb2005-11-10 13:01:24 -08004298#ifdef CONFIG_BUG
Joe Perchesf629d202013-09-26 14:48:15 -07004299void netdev_rx_csum_fault(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08004300#else
4301static inline void netdev_rx_csum_fault(struct net_device *dev)
4302{
4303}
4304#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305/* rx skb timestamps */
Joe Perchesf629d202013-09-26 14:48:15 -07004306void net_enable_timestamp(void);
4307void net_disable_timestamp(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03004309#ifdef CONFIG_PROC_FS
Joe Perchesf629d202013-09-26 14:48:15 -07004310int __init dev_proc_init(void);
Cong Wang900ff8c2013-02-18 19:20:33 +00004311#else
4312#define dev_proc_init() 0
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03004313#endif
4314
David S. Miller47982482014-08-22 16:21:53 -07004315static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
David S. Millerfa2dbdc2014-08-29 21:55:22 -07004316 struct sk_buff *skb, struct net_device *dev,
4317 bool more)
David S. Miller47982482014-08-22 16:21:53 -07004318{
David S. Millerfa2dbdc2014-08-29 21:55:22 -07004319 skb->xmit_more = more ? 1 : 0;
David S. Miller0b725a22014-08-25 15:51:53 -07004320 return ops->ndo_start_xmit(skb, dev);
David S. Miller47982482014-08-22 16:21:53 -07004321}
4322
David S. Miller10b3ad82014-08-29 21:07:24 -07004323static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
David S. Millerfa2dbdc2014-08-29 21:55:22 -07004324 struct netdev_queue *txq, bool more)
David S. Miller47982482014-08-22 16:21:53 -07004325{
4326 const struct net_device_ops *ops = dev->netdev_ops;
David S. Miller10b3ad82014-08-29 21:07:24 -07004327 int rc;
David S. Miller47982482014-08-22 16:21:53 -07004328
David S. Millerfa2dbdc2014-08-29 21:55:22 -07004329 rc = __netdev_start_xmit(ops, skb, dev, more);
David S. Miller10b3ad82014-08-29 21:07:24 -07004330 if (rc == NETDEV_TX_OK)
4331 txq_trans_update(txq);
4332
4333 return rc;
David S. Miller47982482014-08-22 16:21:53 -07004334}
4335
stephen hemmingerb793dc52017-08-18 13:46:20 -07004336int netdev_class_create_file_ns(const struct class_attribute *class_attr,
Linus Torvalds42a2d922013-11-13 17:40:34 +09004337 const void *ns);
stephen hemmingerb793dc52017-08-18 13:46:20 -07004338void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
Linus Torvalds42a2d922013-11-13 17:40:34 +09004339 const void *ns);
Tejun Heo58292cbe2013-09-11 22:29:04 -04004340
stephen hemmingerb793dc52017-08-18 13:46:20 -07004341static inline int netdev_class_create_file(const struct class_attribute *class_attr)
Tejun Heo58292cbe2013-09-11 22:29:04 -04004342{
4343 return netdev_class_create_file_ns(class_attr, NULL);
4344}
4345
stephen hemmingerb793dc52017-08-18 13:46:20 -07004346static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
Tejun Heo58292cbe2013-09-11 22:29:04 -04004347{
4348 netdev_class_remove_file_ns(class_attr, NULL);
4349}
Jay Vosburghb8a97872008-06-13 18:12:04 -07004350
stephen hemminger737aec52017-08-18 13:46:22 -07004351extern const struct kobj_ns_type_operations net_ns_type_operations;
Johannes Berg04600792010-08-05 17:45:15 +02004352
Joe Perchesf629d202013-09-26 14:48:15 -07004353const char *netdev_drivername(const struct net_device *dev);
Arjan van de Ven6579e572008-07-21 13:31:48 -07004354
Joe Perchesf629d202013-09-26 14:48:15 -07004355void linkwatch_run_queue(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03004356
Michal Kubečekda081432014-05-20 08:29:25 +02004357static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4358 netdev_features_t f2)
4359{
Tom Herbertc8cd0982015-12-14 11:19:44 -08004360 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4361 if (f1 & NETIF_F_HW_CSUM)
Tom Herbertb6a0e722016-01-11 10:19:10 -08004362 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
Tom Herbertc8cd0982015-12-14 11:19:44 -08004363 else
Tom Herbertb6a0e722016-01-11 10:19:10 -08004364 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
Tom Herbertc8cd0982015-12-14 11:19:44 -08004365 }
Michal Kubečekda081432014-05-20 08:29:25 +02004366
Tom Herbertc8cd0982015-12-14 11:19:44 -08004367 return f1 & f2;
Michal Kubečekda081432014-05-20 08:29:25 +02004368}
4369
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004370static inline netdev_features_t netdev_get_wanted_features(
4371 struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00004372{
4373 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4374}
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004375netdev_features_t netdev_increment_features(netdev_features_t all,
4376 netdev_features_t one, netdev_features_t mask);
Eric Dumazetb0ce3502013-05-16 07:34:53 +00004377
4378/* Allow TSO being used on stacked device :
4379 * Performing the GSO segmentation before last device
4380 * is a performance improvement.
4381 */
4382static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4383 netdev_features_t mask)
4384{
4385 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4386}
4387
Michał Mirosław6cb6a272011-04-02 22:48:47 -07004388int __netdev_update_features(struct net_device *dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00004389void netdev_update_features(struct net_device *dev);
Michał Mirosławafe12cc2011-05-07 03:22:17 +00004390void netdev_change_features(struct net_device *dev);
Herbert Xu7f353bf2007-08-10 15:47:58 -07004391
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08004392void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4393 struct net_device *dev);
4394
Toshiaki Makitae38f3022015-03-27 14:31:13 +09004395netdev_features_t passthru_features_check(struct sk_buff *skb,
4396 struct net_device *dev,
4397 netdev_features_t features);
Florian Westphalc1e756b2014-05-05 15:00:44 +02004398netdev_features_t netif_skb_features(struct sk_buff *skb);
Jesse Gross58e998c2010-10-29 12:14:55 +00004399
David S. Miller4d295152012-03-07 21:02:35 -05004400static inline bool net_gso_ok(netdev_features_t features, int gso_type)
Herbert Xubcd76112006-06-30 13:36:35 -07004401{
Marcelo Ricardo Leitner7b748342016-04-25 15:13:17 -03004402 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
Michał Mirosław0345e182011-11-16 14:05:33 +00004403
4404 /* check flags correspondence */
4405 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
Michał Mirosław0345e182011-11-16 14:05:33 +00004406 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4407 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
Alexander Duyckcbc53e02016-04-10 21:44:51 -04004408 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
Michał Mirosław0345e182011-11-16 14:05:33 +00004409 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4410 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
Tom Herbert4b282522014-06-14 23:23:52 -07004411 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4412 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
Tom Herbert7e133182016-05-18 09:06:10 -07004413 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4414 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
Tom Herbert4b282522014-06-14 23:23:52 -07004415 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4416 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
Alexander Duyck802ab552016-04-10 21:45:03 -04004417 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
Tom Herberte585f232014-11-04 09:06:54 -08004418 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -03004419 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
Steffen Klassertc7ef8f02017-04-14 10:05:36 +02004420 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
Willem de Bruijn0c19f8462017-11-21 10:22:25 -05004421 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
Willem de Bruijn83aa0252018-04-26 13:42:21 -04004422 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
Michał Mirosław0345e182011-11-16 14:05:33 +00004423
Herbert Xubcd76112006-06-30 13:36:35 -07004424 return (features & feature) == feature;
4425}
4426
David S. Miller4d295152012-03-07 21:02:35 -05004427static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
Herbert Xu576a30e2006-06-27 13:22:38 -07004428{
Herbert Xu278b2512009-06-03 21:20:51 -07004429 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
David S. Miller21dc3302010-08-23 00:13:46 -07004430 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07004431}
4432
Johannes Berg8b86a612015-04-17 15:45:04 +02004433static inline bool netif_needs_gso(struct sk_buff *skb,
David S. Miller4d295152012-03-07 21:02:35 -05004434 netdev_features_t features)
Herbert Xu79671682006-06-22 02:40:14 -07004435{
Jesse Grossfc741212011-01-09 06:23:32 +00004436 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
Yi Zoucdbee742012-03-16 23:08:11 +00004437 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4438 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
Herbert Xu79671682006-06-22 02:40:14 -07004439}
4440
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004441static inline void netif_set_gso_max_size(struct net_device *dev,
4442 unsigned int size)
4443{
4444 dev->gso_max_size = size;
4445}
4446
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -08004447static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4448 int pulled_hlen, u16 mac_offset,
4449 int mac_len)
4450{
4451 skb->protocol = protocol;
4452 skb->encapsulation = 1;
4453 skb_push(skb, pulled_hlen);
4454 skb_reset_transport_header(skb);
4455 skb->mac_header = mac_offset;
4456 skb->network_header = skb->mac_header + mac_len;
4457 skb->mac_len = mac_len;
4458}
4459
Sabrina Dubroca3c175782016-03-11 18:07:32 +01004460static inline bool netif_is_macsec(const struct net_device *dev)
4461{
4462 return dev->priv_flags & IFF_MACSEC;
4463}
4464
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004465static inline bool netif_is_macvlan(const struct net_device *dev)
John Fastabenda6cc0cf2013-11-06 09:54:46 -08004466{
4467 return dev->priv_flags & IFF_MACVLAN;
4468}
4469
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004470static inline bool netif_is_macvlan_port(const struct net_device *dev)
Mahesh Bandewar2f33e7d2014-12-06 15:53:04 -08004471{
4472 return dev->priv_flags & IFF_MACVLAN_PORT;
4473}
4474
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004475static inline bool netif_is_bond_master(const struct net_device *dev)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00004476{
4477 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4478}
4479
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004480static inline bool netif_is_bond_slave(const struct net_device *dev)
Jiri Pirko1765a572011-02-12 06:48:36 +00004481{
4482 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4483}
4484
Ben Greear3bdc0eb2012-02-11 15:39:30 +00004485static inline bool netif_supports_nofcs(struct net_device *dev)
4486{
4487 return dev->priv_flags & IFF_SUPP_NOFCS;
4488}
4489
David Ahern007979e2015-09-29 20:07:10 -07004490static inline bool netif_is_l3_master(const struct net_device *dev)
David Ahern4e3c8992015-08-13 14:59:00 -06004491{
David Ahern007979e2015-09-29 20:07:10 -07004492 return dev->priv_flags & IFF_L3MDEV_MASTER;
David Ahern4e3c8992015-08-13 14:59:00 -06004493}
4494
David Ahernfee6d4c2015-10-05 08:51:24 -07004495static inline bool netif_is_l3_slave(const struct net_device *dev)
4496{
4497 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4498}
4499
Jiri Pirko0894ae32015-08-27 09:31:19 +02004500static inline bool netif_is_bridge_master(const struct net_device *dev)
4501{
4502 return dev->priv_flags & IFF_EBRIDGE;
4503}
4504
Vlad Yasevich28f9ee22015-11-16 15:43:45 -05004505static inline bool netif_is_bridge_port(const struct net_device *dev)
4506{
4507 return dev->priv_flags & IFF_BRIDGE_PORT;
4508}
4509
Jiri Pirko35d4e172015-08-27 09:31:20 +02004510static inline bool netif_is_ovs_master(const struct net_device *dev)
4511{
4512 return dev->priv_flags & IFF_OPENVSWITCH;
4513}
4514
Jiri Pirko5be66142017-04-18 16:55:36 +02004515static inline bool netif_is_ovs_port(const struct net_device *dev)
4516{
4517 return dev->priv_flags & IFF_OVS_DATAPATH;
4518}
4519
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004520static inline bool netif_is_team_master(const struct net_device *dev)
Jiri Pirkoc981e422015-12-03 12:12:06 +01004521{
4522 return dev->priv_flags & IFF_TEAM;
4523}
4524
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004525static inline bool netif_is_team_port(const struct net_device *dev)
Jiri Pirkof7f019e2015-12-03 12:12:07 +01004526{
4527 return dev->priv_flags & IFF_TEAM_PORT;
4528}
4529
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004530static inline bool netif_is_lag_master(const struct net_device *dev)
Jiri Pirko7be61832015-12-03 12:12:08 +01004531{
4532 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4533}
4534
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004535static inline bool netif_is_lag_port(const struct net_device *dev)
Jiri Pirkoe0ba1412015-12-03 12:12:09 +01004536{
4537 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4538}
4539
Keller, Jacob Ed4ab4282016-02-08 16:05:03 -08004540static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4541{
4542 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4543}
4544
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07004545static inline bool netif_is_failover(const struct net_device *dev)
4546{
4547 return dev->priv_flags & IFF_FAILOVER;
4548}
4549
4550static inline bool netif_is_failover_slave(const struct net_device *dev)
4551{
4552 return dev->priv_flags & IFF_FAILOVER_SLAVE;
4553}
4554
Eric Dumazet02875872014-10-05 18:38:35 -07004555/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4556static inline void netif_keep_dst(struct net_device *dev)
4557{
4558 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4559}
4560
Paolo Abeni18d3df32016-07-14 18:00:10 +02004561/* return true if dev can't cope with mtu frames that need vlan tag insertion */
4562static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4563{
4564 /* TODO: reserve and use an additional IFF bit, if we get more users */
4565 return dev->priv_flags & IFF_MACSEC;
4566}
4567
Eric W. Biederman505d4f72008-11-07 22:54:20 -08004568extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00004569
Joe Perches571ba422010-02-09 11:49:47 +00004570/* Logging, debugging and troubleshooting/diagnostic helpers. */
4571
4572/* netdev_printk helpers, similar to dev_printk */
4573
4574static inline const char *netdev_name(const struct net_device *dev)
4575{
Veaceslav Falicoc6f854d2014-07-17 19:46:09 +02004576 if (!dev->name[0] || strchr(dev->name, '%'))
4577 return "(unnamed net_device)";
Joe Perches571ba422010-02-09 11:49:47 +00004578 return dev->name;
4579}
4580
David Ahern8397ed32017-06-07 12:26:23 -06004581static inline bool netdev_unregistering(const struct net_device *dev)
4582{
4583 return dev->reg_state == NETREG_UNREGISTERING;
4584}
4585
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02004586static inline const char *netdev_reg_state(const struct net_device *dev)
4587{
4588 switch (dev->reg_state) {
4589 case NETREG_UNINITIALIZED: return " (uninitialized)";
4590 case NETREG_REGISTERED: return "";
4591 case NETREG_UNREGISTERING: return " (unregistering)";
4592 case NETREG_UNREGISTERED: return " (unregistered)";
4593 case NETREG_RELEASED: return " (released)";
4594 case NETREG_DUMMY: return " (dummy)";
4595 }
4596
4597 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4598 return " (unknown)";
4599}
4600
Joe Perchesf629d202013-09-26 14:48:15 -07004601__printf(3, 4)
Joe Perches6ea754e2014-09-22 11:10:50 -07004602void netdev_printk(const char *level, const struct net_device *dev,
4603 const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07004604__printf(2, 3)
Joe Perches6ea754e2014-09-22 11:10:50 -07004605void netdev_emerg(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07004606__printf(2, 3)
Joe Perches6ea754e2014-09-22 11:10:50 -07004607void netdev_alert(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07004608__printf(2, 3)
Joe Perches6ea754e2014-09-22 11:10:50 -07004609void netdev_crit(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07004610__printf(2, 3)
Joe Perches6ea754e2014-09-22 11:10:50 -07004611void netdev_err(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07004612__printf(2, 3)
Joe Perches6ea754e2014-09-22 11:10:50 -07004613void netdev_warn(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07004614__printf(2, 3)
Joe Perches6ea754e2014-09-22 11:10:50 -07004615void netdev_notice(const struct net_device *dev, const char *format, ...);
Joe Perchesf629d202013-09-26 14:48:15 -07004616__printf(2, 3)
Joe Perches6ea754e2014-09-22 11:10:50 -07004617void netdev_info(const struct net_device *dev, const char *format, ...);
Joe Perches571ba422010-02-09 11:49:47 +00004618
Gal Pressman375ef2b2017-09-17 13:43:58 +03004619#define netdev_level_once(level, dev, fmt, ...) \
4620do { \
4621 static bool __print_once __read_mostly; \
4622 \
4623 if (!__print_once) { \
4624 __print_once = true; \
4625 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
4626 } \
4627} while (0)
4628
4629#define netdev_emerg_once(dev, fmt, ...) \
4630 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
4631#define netdev_alert_once(dev, fmt, ...) \
4632 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
4633#define netdev_crit_once(dev, fmt, ...) \
4634 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
4635#define netdev_err_once(dev, fmt, ...) \
4636 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
4637#define netdev_warn_once(dev, fmt, ...) \
4638 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
4639#define netdev_notice_once(dev, fmt, ...) \
4640 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
4641#define netdev_info_once(dev, fmt, ...) \
4642 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
4643
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03004644#define MODULE_ALIAS_NETDEV(device) \
4645 MODULE_ALIAS("netdev-" device)
4646
Jim Cromieb558c962011-12-19 17:11:18 -05004647#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perches571ba422010-02-09 11:49:47 +00004648#define netdev_dbg(__dev, format, args...) \
4649do { \
Jason Baronffa10cb2011-08-11 14:36:48 -04004650 dynamic_netdev_dbg(__dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00004651} while (0)
Jim Cromieb558c962011-12-19 17:11:18 -05004652#elif defined(DEBUG)
4653#define netdev_dbg(__dev, format, args...) \
4654 netdev_printk(KERN_DEBUG, __dev, format, ##args)
Joe Perches571ba422010-02-09 11:49:47 +00004655#else
4656#define netdev_dbg(__dev, format, args...) \
4657({ \
4658 if (0) \
4659 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00004660})
4661#endif
4662
4663#if defined(VERBOSE_DEBUG)
4664#define netdev_vdbg netdev_dbg
4665#else
4666
4667#define netdev_vdbg(dev, format, args...) \
4668({ \
4669 if (0) \
4670 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4671 0; \
4672})
4673#endif
4674
4675/*
4676 * netdev_WARN() acts like dev_printk(), but with the key difference
4677 * of using a WARN/WARN_ON to get the message out, including the
4678 * file/line information and a backtrace.
4679 */
4680#define netdev_WARN(dev, format, args...) \
Gal Pressmane1cfe3d2018-01-07 12:08:36 +02004681 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02004682 netdev_reg_state(dev), ##args)
Joe Perches571ba422010-02-09 11:49:47 +00004683
Gal Pressman72dd8312018-01-07 12:08:35 +02004684#define netdev_WARN_ONCE(dev, format, args...) \
Gal Pressmane1cfe3d2018-01-07 12:08:36 +02004685 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
Gal Pressman375ef2b2017-09-17 13:43:58 +03004686 netdev_reg_state(dev), ##args)
4687
Joe Perchesb3d95c52010-02-09 11:49:49 +00004688/* netif printk helpers, similar to netdev_printk */
4689
4690#define netif_printk(priv, type, level, dev, fmt, args...) \
4691do { \
4692 if (netif_msg_##type(priv)) \
4693 netdev_printk(level, (dev), fmt, ##args); \
4694} while (0)
4695
Joe Perchesf45f4322010-06-27 01:02:36 +00004696#define netif_level(level, priv, type, dev, fmt, args...) \
4697do { \
4698 if (netif_msg_##type(priv)) \
4699 netdev_##level(dev, fmt, ##args); \
4700} while (0)
4701
Joe Perchesb3d95c52010-02-09 11:49:49 +00004702#define netif_emerg(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004703 netif_level(emerg, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004704#define netif_alert(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004705 netif_level(alert, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004706#define netif_crit(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004707 netif_level(crit, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004708#define netif_err(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004709 netif_level(err, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004710#define netif_warn(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004711 netif_level(warn, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004712#define netif_notice(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004713 netif_level(notice, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004714#define netif_info(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004715 netif_level(info, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004716
Joe Perches0053ea92012-05-30 07:43:34 +00004717#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004718#define netif_dbg(priv, type, netdev, format, args...) \
4719do { \
4720 if (netif_msg_##type(priv)) \
Jason Baronb5fb0a02011-08-11 14:36:53 -04004721 dynamic_netdev_dbg(netdev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00004722} while (0)
Joe Perches0053ea92012-05-30 07:43:34 +00004723#elif defined(DEBUG)
4724#define netif_dbg(priv, type, dev, format, args...) \
4725 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004726#else
4727#define netif_dbg(priv, type, dev, format, args...) \
4728({ \
4729 if (0) \
4730 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4731 0; \
4732})
4733#endif
4734
Edward Creef617f272017-01-27 15:02:26 +00004735/* if @cond then downgrade to debug, else print at @level */
4736#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4737 do { \
4738 if (cond) \
4739 netif_dbg(priv, type, netdev, fmt, ##args); \
4740 else \
4741 netif_ ## level(priv, type, netdev, fmt, ##args); \
4742 } while (0)
4743
Joe Perchesb3d95c52010-02-09 11:49:49 +00004744#if defined(VERBOSE_DEBUG)
Ben Hutchingsbcfcc452010-07-02 07:08:44 +00004745#define netif_vdbg netif_dbg
Joe Perchesb3d95c52010-02-09 11:49:49 +00004746#else
4747#define netif_vdbg(priv, type, dev, format, args...) \
4748({ \
4749 if (0) \
Ben Hutchingsa4ed89c2010-05-18 06:56:32 +00004750 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00004751 0; \
4752})
4753#endif
Joe Perches571ba422010-02-09 11:49:47 +00004754
Cong Wang900ff8c2013-02-18 19:20:33 +00004755/*
4756 * The list of packet types we will receive (as opposed to discard)
4757 * and the routines to invoke.
4758 *
4759 * Why 16. Because with 16 the only overlap we get on a hash of the
4760 * low nibble of the protocol value is RARP/SNAP/X.25.
4761 *
Cong Wang900ff8c2013-02-18 19:20:33 +00004762 * 0800 IP
Cong Wang900ff8c2013-02-18 19:20:33 +00004763 * 0001 802.3
4764 * 0002 AX.25
4765 * 0004 802.2
4766 * 8035 RARP
4767 * 0005 SNAP
4768 * 0805 X.25
4769 * 0806 ARP
4770 * 8137 IPX
4771 * 0009 Localtalk
4772 * 86DD IPv6
4773 */
4774#define PTYPE_HASH_SIZE (16)
4775#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4776
Jiri Pirko385a1542009-05-27 15:48:07 -07004777#endif /* _LINUX_NETDEVICE_H */