blob: 88292953aa6fdf1d6713da2c825bf08978051bd8 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the Interfaces handler.
8 *
9 * Version: @(#)dev.h 1.0.10 08/12/93
10 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070011 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
Alan Cox113aa832008-10-13 19:01:08 -070015 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * Bjorn Ekwall. <bj0rn@blox.se>
17 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 * Moved to /usr/include/linux for NET3
20 */
21#ifndef _LINUX_NETDEVICE_H
22#define _LINUX_NETDEVICE_H
23
Al Virod7fe0f22006-12-03 23:15:30 -050024#include <linux/timer.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050025#include <linux/bug.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070026#include <linux/delay.h>
Arun Sharma600634972011-07-26 16:09:06 -070027#include <linux/atomic.h>
Eric Dumazet53511452014-10-08 08:19:27 -070028#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/cache.h>
30#include <asm/byteorder.h>
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/percpu.h>
David S. Miller4d5b78c2009-05-06 16:52:51 -070033#include <linux/rculist.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070034#include <linux/workqueue.h>
Tom Herbert114cf582011-11-28 16:33:09 +000035#include <linux/dynamic_queue_limits.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Patrick McHardyb1b67dd2009-04-20 04:49:28 +000037#include <linux/ethtool.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020038#include <net/net_namespace.h>
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080039#ifdef CONFIG_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -080040#include <net/dcbnl.h>
41#endif
Neil Horman5bc14212011-11-22 05:10:51 +000042#include <net/netprio_cgroup.h>
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +010043#include <net/xdp.h>
Daniel Lezcanoa050c332007-09-12 14:57:09 +020044
Michał Mirosława59e2ec2011-11-15 15:29:55 +000045#include <linux/netdev_features.h>
John Fastabend77162022012-04-15 06:43:56 +000046#include <linux/neighbour.h>
David Howells607ca462012-10-13 10:46:48 +010047#include <uapi/linux/netdevice.h>
Moni Shoua61bd3852015-02-03 16:48:29 +020048#include <uapi/linux/if_bonding.h>
John Fastabende4c67342016-02-16 21:16:15 -080049#include <uapi/linux/pkt_cls.h>
Jiri Kosina59cc1f62016-08-10 11:05:15 +020050#include <linux/hashtable.h>
Michał Mirosława59e2ec2011-11-15 15:29:55 +000051
Jeff Moyer115c1d62005-06-22 22:05:31 -070052struct netpoll_info;
Paul Gortmaker313162d2012-01-30 11:46:54 -050053struct device;
Richard Cochranc1f19b52010-07-17 08:49:36 +000054struct phy_device;
Vivien Didelot2f657a62017-09-29 17:19:20 -040055struct dsa_port;
Andrew Lunnc6e970a2017-03-28 23:45:06 +020056
Russell Kinge679c9c2018-03-28 15:44:16 -070057struct sfp_bus;
Johannes Berg704232c2007-04-23 12:20:05 -070058/* 802.11 specific */
59struct wireless_dev;
Alexander Aring98a18b62014-11-02 06:44:54 +010060/* 802.15.4 specific */
61struct wpan_dev;
Robert Shearman03c57742015-04-22 11:14:37 +010062struct mpls_dev;
Alexander Duyck7c46a642016-06-16 12:21:00 -070063/* UDP Tunnel offloads */
64struct udp_tunnel_info;
Brenden Blancoa7862b42016-07-19 12:16:48 -070065struct bpf_prog;
John Fastabend814abfa2017-07-17 09:27:07 -070066struct xdp_buff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Joe Perchesf629d202013-09-26 14:48:15 -070068void netdev_set_default_ethtool_ops(struct net_device *dev,
69 const struct ethtool_ops *ops);
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +000070
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000071/* Backlog congestion levels */
72#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
73#define NET_RX_DROP 1 /* packet dropped */
74
Patrick McHardy572a9d72009-11-10 06:14:14 +000075/*
76 * Transmit return codes: transmit return codes originate from three different
77 * namespaces:
78 *
79 * - qdisc return codes
80 * - driver transmit return codes
81 * - errno values
82 *
83 * Drivers are allowed to return any one of those in their hard_start_xmit()
84 * function. Real network devices commonly used with qdiscs should only return
85 * the driver transmit return codes though - when qdiscs are used, the actual
86 * transmission happens asynchronously, so the value is not propagated to
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -050087 * higher layers. Virtual network devices transmit synchronously; in this case
88 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
Patrick McHardy572a9d72009-11-10 06:14:14 +000089 * others are propagated to higher layers.
90 */
91
92/* qdisc ->enqueue() return codes. */
93#define NET_XMIT_SUCCESS 0x00
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000094#define NET_XMIT_DROP 0x01 /* skb dropped */
95#define NET_XMIT_CN 0x02 /* congestion notification */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +000096#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -020098/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
99 * indicates that the device will soon be dropping packets, or already drops
100 * some packets of the same priority; prompting us to send less aggressively. */
Patrick McHardy572a9d72009-11-10 06:14:14 +0000101#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
103
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000104/* Driver transmit return codes */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000105#define NETDEV_TX_MASK 0xf0
Patrick McHardy572a9d72009-11-10 06:14:14 +0000106
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000107enum netdev_tx {
Patrick McHardy572a9d72009-11-10 06:14:14 +0000108 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000109 NETDEV_TX_OK = 0x00, /* driver took care of packet */
110 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000111};
112typedef enum netdev_tx netdev_tx_t;
113
Jarek Poplawski9a1654b2009-11-15 07:20:12 +0000114/*
115 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
116 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
117 */
118static inline bool dev_xmit_complete(int rc)
119{
120 /*
121 * Positive cases with an skb consumed by a driver:
122 * - successful transmission (rc == NETDEV_TX_OK)
123 * - error while transmitting (rc < 0)
124 * - error while queueing to a different device (rc & NET_XMIT_MASK)
125 */
126 if (likely(rc < NET_XMIT_MASK))
127 return true;
128
129 return false;
130}
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500133 * Compute the worst-case header length according to the protocols
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 * used.
135 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800136
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800137#if defined(CONFIG_HYPERV_NET)
138# define LL_MAX_HEADER 128
139#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
David S. Miller8388e3d2008-05-12 20:17:33 -0700140# if defined(CONFIG_MAC80211_MESH)
141# define LL_MAX_HEADER 128
142# else
143# define LL_MAX_HEADER 96
144# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700146# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#endif
148
Ben Hutchingsd11ead72011-11-25 14:40:26 +0000149#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
150 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151#define MAX_HEADER LL_MAX_HEADER
152#else
153#define MAX_HEADER (LL_MAX_HEADER + 48)
154#endif
155
156/*
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000157 * Old network device statistics. Fields are native words
158 * (unsigned long) so they can be read and written atomically.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 */
Graf Yangfe2918b2009-02-05 21:26:19 -0800160
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800161struct net_device_stats {
Ben Hutchings3cfde792010-07-09 09:11:52 +0000162 unsigned long rx_packets;
163 unsigned long tx_packets;
164 unsigned long rx_bytes;
165 unsigned long tx_bytes;
166 unsigned long rx_errors;
167 unsigned long tx_errors;
168 unsigned long rx_dropped;
169 unsigned long tx_dropped;
170 unsigned long multicast;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 unsigned long collisions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 unsigned long rx_length_errors;
Ben Hutchings3cfde792010-07-09 09:11:52 +0000173 unsigned long rx_over_errors;
174 unsigned long rx_crc_errors;
175 unsigned long rx_frame_errors;
176 unsigned long rx_fifo_errors;
177 unsigned long rx_missed_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 unsigned long tx_aborted_errors;
179 unsigned long tx_carrier_errors;
180 unsigned long tx_fifo_errors;
181 unsigned long tx_heartbeat_errors;
182 unsigned long tx_window_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 unsigned long rx_compressed;
184 unsigned long tx_compressed;
185};
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188#include <linux/cache.h>
189#include <linux/skbuff.h>
190
Eric Dumazetadc93002011-11-17 03:13:26 +0000191#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +0100192#include <linux/static_key.h>
Eric Dumazetdc053602019-03-22 08:56:38 -0700193extern struct static_key_false rps_needed;
194extern struct static_key_false rfs_needed;
Eric Dumazetadc93002011-11-17 03:13:26 +0000195#endif
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197struct neighbour;
198struct neigh_parms;
199struct sk_buff;
200
Jiri Pirkof001fde2009-05-05 02:48:28 +0000201struct netdev_hw_addr {
202 struct list_head list;
203 unsigned char addr[MAX_ADDR_LEN];
204 unsigned char type;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000205#define NETDEV_HW_ADDR_T_LAN 1
206#define NETDEV_HW_ADDR_T_SAN 2
207#define NETDEV_HW_ADDR_T_SLAVE 3
208#define NETDEV_HW_ADDR_T_UNICAST 4
Jiri Pirko22bedad32010-04-01 21:22:57 +0000209#define NETDEV_HW_ADDR_T_MULTICAST 5
Jiri Pirko22bedad32010-04-01 21:22:57 +0000210 bool global_use;
Vlad Yasevich4cd729b02013-04-15 09:54:25 +0000211 int sync_cnt;
Eric Dumazet8f8f1032010-09-19 11:24:02 -0700212 int refcount;
Vlad Yasevich4543fbe2013-04-02 17:10:07 -0400213 int synced;
Jiri Pirkof001fde2009-05-05 02:48:28 +0000214 struct rcu_head rcu_head;
215};
216
Jiri Pirko31278e72009-06-17 01:12:19 +0000217struct netdev_hw_addr_list {
218 struct list_head list;
219 int count;
220};
221
Jiri Pirko22bedad32010-04-01 21:22:57 +0000222#define netdev_hw_addr_list_count(l) ((l)->count)
223#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
224#define netdev_hw_addr_list_for_each(ha, l) \
225 list_for_each_entry(ha, &(l)->list, list)
226
227#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
228#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800229#define netdev_for_each_uc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000230 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -0800231
Jiri Pirko22bedad32010-04-01 21:22:57 +0000232#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
233#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
Pavel Roskin18e225f2010-04-07 16:40:09 -0700234#define netdev_for_each_mc_addr(ha, dev) \
Jiri Pirko22bedad32010-04-01 21:22:57 +0000235 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
Jiri Pirko6683ece32010-02-04 10:22:25 -0800236
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800237struct hh_cache {
Alexey Dobriyan5b3dc2f2017-04-10 11:11:17 +0300238 unsigned int hh_len;
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800239 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 /* cached hardware header; allow for machine alignment needs. */
242#define HH_DATA_MOD 16
243#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700244 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245#define HH_DATA_ALIGN(__len) \
246 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
247 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
248};
249
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500250/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 * Alternative is:
252 * dev->hard_header_len ? (dev->hard_header_len +
253 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
254 *
255 * We could use other alignment values, but we must maintain the
256 * relationship HH alignment <= LL alignment.
257 */
258#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700259 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700261 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700263struct header_ops {
264 int (*create) (struct sk_buff *skb, struct net_device *dev,
265 unsigned short type, const void *daddr,
Eric Dumazet95c96172012-04-15 05:58:06 +0000266 const void *saddr, unsigned int len);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700267 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
David S. Millere69dd332011-07-12 23:28:12 -0700268 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700269 void (*cache_update)(struct hh_cache *hh,
270 const struct net_device *dev,
271 const unsigned char *haddr);
Willem de Bruijn2793a232016-03-09 21:58:32 -0500272 bool (*validate)(const char *ll_header, unsigned int len);
Maxim Mikityanskiye78b2912019-02-21 12:39:58 +0000273 __be16 (*parse_protocol)(const struct sk_buff *skb);
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700274};
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276/* These flag bits are private to the generic network queueing
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500277 * layer; they may not be explicitly referenced by any other
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 * code.
279 */
280
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800281enum netdev_state_t {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 __LINK_STATE_START,
283 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800285 __LINK_STATE_LINKWATCH_PENDING,
286 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287};
288
289
290/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500291 * This structure holds boot-time configured netdevice settings. They
Graf Yangfe2918b2009-02-05 21:26:19 -0800292 * are then used in the device probing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 */
294struct netdev_boot_setup {
295 char name[IFNAMSIZ];
296 struct ifmap map;
297};
298#define NETDEV_BOOT_SETUP_MAX 8
299
Joe Perchesf629d202013-09-26 14:48:15 -0700300int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Li RongQing6312fe72018-07-05 14:34:32 +0800302struct gro_list {
303 struct list_head list;
304 int count;
305};
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307/*
Li RongQingd9f37d02018-07-13 14:41:36 +0800308 * size of gro hash buckets, must less than bit number of
309 * napi_struct::gro_bitmask
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700310 */
David Miller07d78362018-06-24 14:14:02 +0900311#define GRO_HASH_BUCKETS 8
Li RongQingd9f37d02018-07-13 14:41:36 +0800312
313/*
314 * Structure for NAPI scheduling similar to tasklet but with weighting
315 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700316struct napi_struct {
317 /* The poll_list must only be managed by the entity which
318 * changes the state of the NAPI_STATE_SCHED bit. This means
319 * whoever atomically sets that bit can add this napi_struct
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500320 * to the per-CPU poll_list, and whoever clears that bit
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700321 * can remove from the list right before clearing the bit.
322 */
323 struct list_head poll_list;
324
325 unsigned long state;
326 int weight;
Li RongQingd9f37d02018-07-13 14:41:36 +0800327 unsigned long gro_bitmask;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700328 int (*poll)(struct napi_struct *, int);
329#ifdef CONFIG_NETPOLL
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700330 int poll_owner;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700331#endif
Herbert Xu5d38a072009-01-04 16:13:40 -0800332 struct net_device *dev;
Li RongQing6312fe72018-07-05 14:34:32 +0800333 struct gro_list gro_hash[GRO_HASH_BUCKETS];
Herbert Xu5d38a072009-01-04 16:13:40 -0800334 struct sk_buff *skb;
Eric Dumazet3b47d302014-11-06 21:09:44 -0800335 struct hrtimer timer;
Eric Dumazet404f7c92012-09-26 07:07:47 +0000336 struct list_head dev_list;
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300337 struct hlist_node napi_hash_node;
338 unsigned int napi_id;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700339};
340
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800341enum {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700342 NAPI_STATE_SCHED, /* Poll is scheduled */
Eric Dumazet39e6c822017-02-28 10:34:50 -0800343 NAPI_STATE_MISSED, /* reschedule a napi */
David S. Millera0a46192008-01-07 20:35:07 -0800344 NAPI_STATE_DISABLE, /* Disable pending */
Neil Horman7b363e42008-12-09 23:22:26 -0800345 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
Eric Dumazetd64b5e82015-11-18 06:31:00 -0800346 NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
347 NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
Eric Dumazet217f6972016-11-15 10:15:11 -0800348 NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
349};
350
351enum {
Eric Dumazet39e6c822017-02-28 10:34:50 -0800352 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
353 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
354 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
355 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
356 NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
357 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
358 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700359};
360
Ben Hutchings5b252f02009-10-29 07:17:09 +0000361enum gro_result {
Herbert Xud1c76af2009-03-16 10:50:02 -0700362 GRO_MERGED,
363 GRO_MERGED_FREE,
364 GRO_HELD,
365 GRO_NORMAL,
366 GRO_DROP,
Steffen Klassert25393d32017-02-15 09:39:44 +0100367 GRO_CONSUMED,
Herbert Xud1c76af2009-03-16 10:50:02 -0700368};
Ben Hutchings5b252f02009-10-29 07:17:09 +0000369typedef enum gro_result gro_result_t;
Herbert Xud1c76af2009-03-16 10:50:02 -0700370
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000371/*
372 * enum rx_handler_result - Possible return values for rx_handlers.
373 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
374 * further.
375 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
376 * case skb->dev was changed by rx_handler.
377 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500378 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000379 *
380 * rx_handlers are functions called from inside __netif_receive_skb(), to do
381 * special processing of the skb, prior to delivery to protocol handlers.
382 *
383 * Currently, a net_device can only have a single rx_handler registered. Trying
384 * to register a second rx_handler will return -EBUSY.
385 *
386 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
387 * To unregister a rx_handler on a net_device, use
388 * netdev_rx_handler_unregister().
389 *
390 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
391 * do with the skb.
392 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500393 * If the rx_handler consumed the skb in some way, it should return
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000394 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500395 * the skb to be delivered in some other way.
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000396 *
397 * If the rx_handler changed skb->dev, to divert the skb to another
398 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
399 * new device will be called if it exists.
400 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500401 * If the rx_handler decides the skb should be ignored, it should return
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000402 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
Adam Buchbinderd93cf062012-09-19 21:47:58 -0400403 * are registered on exact device (ptype->dev == skb->dev).
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000404 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500405 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000406 * delivered, it should return RX_HANDLER_PASS.
407 *
408 * A device without a registered rx_handler will behave as if rx_handler
409 * returned RX_HANDLER_PASS.
410 */
411
412enum rx_handler_result {
413 RX_HANDLER_CONSUMED,
414 RX_HANDLER_ANOTHER,
415 RX_HANDLER_EXACT,
416 RX_HANDLER_PASS,
417};
418typedef enum rx_handler_result rx_handler_result_t;
419typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000420
Joe Perchesf629d202013-09-26 14:48:15 -0700421void __napi_schedule(struct napi_struct *n);
Eric Dumazetbc9ad162014-10-28 18:05:13 -0700422void __napi_schedule_irqoff(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700423
David S. Miller4d295152012-03-07 21:02:35 -0500424static inline bool napi_disable_pending(struct napi_struct *n)
David S. Millera0a46192008-01-07 20:35:07 -0800425{
426 return test_bit(NAPI_STATE_DISABLE, &n->state);
427}
428
Eric Dumazet39e6c822017-02-28 10:34:50 -0800429bool napi_schedule_prep(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700430
431/**
432 * napi_schedule - schedule NAPI poll
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500433 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700434 *
435 * Schedule NAPI poll routine to be called if it is not already
436 * running.
437 */
438static inline void napi_schedule(struct napi_struct *n)
439{
440 if (napi_schedule_prep(n))
441 __napi_schedule(n);
442}
443
Eric Dumazetbc9ad162014-10-28 18:05:13 -0700444/**
445 * napi_schedule_irqoff - schedule NAPI poll
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500446 * @n: NAPI context
Eric Dumazetbc9ad162014-10-28 18:05:13 -0700447 *
448 * Variant of napi_schedule(), assuming hard irqs are masked.
449 */
450static inline void napi_schedule_irqoff(struct napi_struct *n)
451{
452 if (napi_schedule_prep(n))
453 __napi_schedule_irqoff(n);
454}
455
Roland Dreierbfe13f52007-10-09 15:47:37 -0700456/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
David S. Miller4d295152012-03-07 21:02:35 -0500457static inline bool napi_reschedule(struct napi_struct *napi)
Roland Dreierbfe13f52007-10-09 15:47:37 -0700458{
459 if (napi_schedule_prep(napi)) {
460 __napi_schedule(napi);
David S. Miller4d295152012-03-07 21:02:35 -0500461 return true;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700462 }
David S. Miller4d295152012-03-07 21:02:35 -0500463 return false;
Roland Dreierbfe13f52007-10-09 15:47:37 -0700464}
465
Eric Dumazet364b6052016-11-15 10:15:13 -0800466bool napi_complete_done(struct napi_struct *n, int work_done);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700467/**
468 * napi_complete - NAPI processing complete
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500469 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700470 *
471 * Mark NAPI processing as complete.
Eric Dumazet3b47d302014-11-06 21:09:44 -0800472 * Consider using napi_complete_done() instead.
Eric Dumazet364b6052016-11-15 10:15:13 -0800473 * Return false if device should avoid rearming interrupts.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700474 */
Eric Dumazet364b6052016-11-15 10:15:13 -0800475static inline bool napi_complete(struct napi_struct *n)
Eric Dumazet3b47d302014-11-06 21:09:44 -0800476{
477 return napi_complete_done(n, 0);
478}
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700479
480/**
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300481 * napi_hash_del - remove a NAPI from global table
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500482 * @napi: NAPI context
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300483 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500484 * Warning: caller must observe RCU grace period
Eric Dumazet34cbe272015-11-18 06:31:02 -0800485 * before freeing memory containing @napi, if
486 * this function returns true.
Eric Dumazet93d05d42015-11-18 06:31:03 -0800487 * Note: core networking stack automatically calls it
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500488 * from netif_napi_del().
Eric Dumazet93d05d42015-11-18 06:31:03 -0800489 * Drivers might want to call this helper to combine all
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500490 * the needed RCU grace periods into a single one.
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300491 */
Eric Dumazet34cbe272015-11-18 06:31:02 -0800492bool napi_hash_del(struct napi_struct *napi);
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300493
494/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700495 * napi_disable - prevent NAPI from scheduling
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500496 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700497 *
498 * Stop NAPI from being scheduled on this context.
499 * Waits till any outstanding processing completes.
500 */
Eric Dumazet3b47d302014-11-06 21:09:44 -0800501void napi_disable(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700502
503/**
504 * napi_enable - enable NAPI scheduling
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500505 * @n: NAPI context
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700506 *
507 * Resume NAPI from being scheduled on this context.
508 * Must be paired with napi_disable.
509 */
510static inline void napi_enable(struct napi_struct *n)
511{
512 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100513 smp_mb__before_atomic();
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700514 clear_bit(NAPI_STATE_SCHED, &n->state);
Neil Horman2d8bff1262015-09-23 14:57:58 -0400515 clear_bit(NAPI_STATE_NPSVC, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700516}
517
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700518/**
519 * napi_synchronize - wait until NAPI is not running
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500520 * @n: NAPI context
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700521 *
522 * Wait until NAPI is done being scheduled on this context.
523 * Waits till any outstanding processing completes but
524 * does not disable future activations.
525 */
526static inline void napi_synchronize(const struct napi_struct *n)
527{
Arnd Bergmannfacc4322016-01-22 11:43:44 +0100528 if (IS_ENABLED(CONFIG_SMP))
529 while (test_bit(NAPI_STATE_SCHED, &n->state))
530 msleep(1);
531 else
532 barrier();
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700533}
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700534
Magnus Karlsson6c5c958102018-08-28 14:44:28 +0200535/**
536 * napi_if_scheduled_mark_missed - if napi is running, set the
537 * NAPIF_STATE_MISSED
538 * @n: NAPI context
539 *
540 * If napi is running, set the NAPIF_STATE_MISSED, and return true if
541 * NAPI is scheduled.
542 **/
543static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
544{
545 unsigned long val, new;
546
547 do {
548 val = READ_ONCE(n->state);
549 if (val & NAPIF_STATE_DISABLE)
550 return true;
551
552 if (!(val & NAPIF_STATE_SCHED))
553 return false;
554
555 new = val | NAPIF_STATE_MISSED;
556 } while (cmpxchg(&n->state, val, new) != val);
557
558 return true;
559}
560
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800561enum netdev_queue_state_t {
Tom Herbert734664982011-11-28 16:32:44 +0000562 __QUEUE_STATE_DRV_XOFF,
563 __QUEUE_STATE_STACK_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700564 __QUEUE_STATE_FROZEN,
David S. Miller79d16382008-07-08 23:14:46 -0700565};
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +0200566
567#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
568#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
569#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
570
571#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
572#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
573 QUEUE_STATE_FROZEN)
574#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
575 QUEUE_STATE_FROZEN)
576
Tom Herbert734664982011-11-28 16:32:44 +0000577/*
578 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
579 * netif_tx_* functions below are used to manipulate this flag. The
580 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
581 * queue independently. The netif_xmit_*stopped functions below are called
582 * to check if the queue has been stopped by the driver or stack (either
583 * of the XOFF bits are set in the state). Drivers should not need to call
584 * netif_xmit*stopped functions, they should only be using netif_tx_*.
585 */
David S. Miller79d16382008-07-08 23:14:46 -0700586
David S. Millerbb949fb2008-07-08 16:55:56 -0700587struct netdev_queue {
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700588/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500589 * read-mostly part
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700590 */
David S. Millerbb949fb2008-07-08 16:55:56 -0700591 struct net_device *dev;
John Fastabend46e5da40a2014-09-12 20:04:52 -0700592 struct Qdisc __rcu *qdisc;
David S. Millerb0e1e642008-07-08 17:42:10 -0700593 struct Qdisc *qdisc_sleeping;
david decotignyccf5ff62011-11-16 12:15:10 +0000594#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000595 struct kobject kobj;
596#endif
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000597#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
598 int numa_node;
599#endif
Florian Westphalc0ef0792016-05-03 03:29:09 +0200600 unsigned long tx_maxrate;
601 /*
602 * Number of TX timeouts for this queue
603 * (/sys/class/net/DEV/Q/trans_timeout)
604 */
605 unsigned long trans_timeout;
Alexander Duyckffcfe252018-07-09 12:19:38 -0400606
607 /* Subordinate device that the queue has been assigned to */
608 struct net_device *sb_dev;
Magnus Karlsson661b8d1b2018-10-01 14:51:33 +0200609#ifdef CONFIG_XDP_SOCKETS
610 struct xdp_umem *umem;
611#endif
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700612/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500613 * write-mostly part
Eric Dumazet6a321cb2009-04-28 04:43:42 -0700614 */
615 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
616 int xmit_lock_owner;
Eric Dumazet9d214932009-05-17 20:55:16 -0700617 /*
Florian Westphal9b366272016-05-03 16:33:14 +0200618 * Time (in jiffies) of last Tx
Eric Dumazet9d214932009-05-17 20:55:16 -0700619 */
620 unsigned long trans_start;
david decotignyccf5ff62011-11-16 12:15:10 +0000621
Tom Herbert114cf582011-11-28 16:33:09 +0000622 unsigned long state;
623
624#ifdef CONFIG_BQL
625 struct dql dql;
626#endif
David S. Millere8a04642008-07-17 00:34:19 -0700627} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700628
Eric Dumazet79134e62018-03-08 12:51:41 -0800629extern int sysctl_fb_tunnels_only_for_init_net;
Cong Wang856c3952019-01-17 23:27:11 -0800630extern int sysctl_devconf_inherit_init_net;
Eric Dumazet79134e62018-03-08 12:51:41 -0800631
632static inline bool net_has_fallback_tunnels(const struct net *net)
633{
Arnd Bergmannbe9fc092018-03-13 12:44:53 +0100634 return net == &init_net ||
635 !IS_ENABLED(CONFIG_SYSCTL) ||
636 !sysctl_fb_tunnels_only_for_init_net;
Eric Dumazet79134e62018-03-08 12:51:41 -0800637}
638
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000639static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
640{
641#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
642 return q->numa_node;
643#else
Changli Gaob236da62010-12-14 03:09:15 +0000644 return NUMA_NO_NODE;
Eric Dumazetf2cd2d32010-11-29 08:14:37 +0000645#endif
646}
647
648static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
649{
650#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
651 q->numa_node = node;
652#endif
653}
654
Eric Dumazetdf334542010-03-24 19:13:54 +0000655#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000656/*
657 * This structure holds an RPS map which can be of variable length. The
658 * map is an array of CPUs.
659 */
660struct rps_map {
661 unsigned int len;
662 struct rcu_head rcu;
663 u16 cpus[0];
664};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000665#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
Tom Herbert0a9627f2010-03-16 08:03:29 +0000666
Tom Herbertfec5e652010-04-16 16:01:27 -0700667/*
Ben Hutchingsc4454772011-01-19 11:03:53 +0000668 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
669 * tail pointer for that CPU's input queue at the time of last enqueue, and
670 * a hardware filter index.
Tom Herbertfec5e652010-04-16 16:01:27 -0700671 */
672struct rps_dev_flow {
673 u16 cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +0000674 u16 filter;
Tom Herbertfec5e652010-04-16 16:01:27 -0700675 unsigned int last_qtail;
676};
Ben Hutchingsc4454772011-01-19 11:03:53 +0000677#define RPS_NO_FILTER 0xffff
Tom Herbertfec5e652010-04-16 16:01:27 -0700678
679/*
680 * The rps_dev_flow_table structure contains a table of flow mappings.
681 */
682struct rps_dev_flow_table {
683 unsigned int mask;
684 struct rcu_head rcu;
Tom Herbertfec5e652010-04-16 16:01:27 -0700685 struct rps_dev_flow flows[0];
686};
687#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
Eric Dumazet60b778c2011-12-24 06:56:49 +0000688 ((_num) * sizeof(struct rps_dev_flow)))
Tom Herbertfec5e652010-04-16 16:01:27 -0700689
690/*
691 * The rps_sock_flow_table contains mappings of flows to the last CPU
692 * on which they were processed by the application (set in recvmsg).
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500693 * Each entry is a 32bit value. Upper part is the high-order bits
694 * of flow hash, lower part is CPU number.
Eric Dumazet567e4b72015-02-06 12:59:01 -0800695 * rps_cpu_mask is used to partition the space, depending on number of
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500696 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
697 * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
Eric Dumazet567e4b72015-02-06 12:59:01 -0800698 * meaning we use 32-6=26 bits for the hash.
Tom Herbertfec5e652010-04-16 16:01:27 -0700699 */
700struct rps_sock_flow_table {
Eric Dumazet567e4b72015-02-06 12:59:01 -0800701 u32 mask;
Eric Dumazet93c1af62015-02-08 20:39:13 -0800702
703 u32 ents[0] ____cacheline_aligned_in_smp;
Tom Herbertfec5e652010-04-16 16:01:27 -0700704};
Eric Dumazet567e4b72015-02-06 12:59:01 -0800705#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
Tom Herbertfec5e652010-04-16 16:01:27 -0700706
707#define RPS_NO_CPU 0xffff
708
Eric Dumazet567e4b72015-02-06 12:59:01 -0800709extern u32 rps_cpu_mask;
710extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
711
Tom Herbertfec5e652010-04-16 16:01:27 -0700712static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
713 u32 hash)
714{
715 if (table && hash) {
Eric Dumazet567e4b72015-02-06 12:59:01 -0800716 unsigned int index = hash & table->mask;
717 u32 val = hash & ~rps_cpu_mask;
Tom Herbertfec5e652010-04-16 16:01:27 -0700718
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500719 /* We only give a hint, preemption can change CPU under us */
Eric Dumazet567e4b72015-02-06 12:59:01 -0800720 val |= raw_smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -0700721
Eric Dumazet567e4b72015-02-06 12:59:01 -0800722 if (table->ents[index] != val)
723 table->ents[index] = val;
Tom Herbertfec5e652010-04-16 16:01:27 -0700724 }
725}
726
Ben Hutchingsc4454772011-01-19 11:03:53 +0000727#ifdef CONFIG_RFS_ACCEL
Joe Perchesf629d202013-09-26 14:48:15 -0700728bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
729 u16 filter_id);
Ben Hutchingsc4454772011-01-19 11:03:53 +0000730#endif
Michael Daltona953be52014-01-16 22:23:28 -0800731#endif /* CONFIG_RPS */
Ben Hutchingsc4454772011-01-19 11:03:53 +0000732
Tom Herbert0a9627f2010-03-16 08:03:29 +0000733/* This structure contains an instance of an RX queue. */
734struct netdev_rx_queue {
Michael Daltona953be52014-01-16 22:23:28 -0800735#ifdef CONFIG_RPS
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000736 struct rps_map __rcu *rps_map;
737 struct rps_dev_flow_table __rcu *rps_flow_table;
Michael Daltona953be52014-01-16 22:23:28 -0800738#endif
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000739 struct kobject kobj;
Tom Herbertfe822242010-11-09 10:47:38 +0000740 struct net_device *dev;
Jesper Dangaard Brouere817f852018-01-03 11:26:09 +0100741 struct xdp_rxq_info xdp_rxq;
Magnus Karlsson661b8d1b2018-10-01 14:51:33 +0200742#ifdef CONFIG_XDP_SOCKETS
743 struct xdp_umem *umem;
744#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000745} ____cacheline_aligned_in_smp;
Michael Daltona953be52014-01-16 22:23:28 -0800746
747/*
748 * RX queue sysfs structures and functions.
749 */
750struct rx_queue_attribute {
751 struct attribute attr;
stephen hemminger718ad682017-08-18 13:46:24 -0700752 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
Michael Daltona953be52014-01-16 22:23:28 -0800753 ssize_t (*store)(struct netdev_rx_queue *queue,
stephen hemminger718ad682017-08-18 13:46:24 -0700754 const char *buf, size_t len);
Michael Daltona953be52014-01-16 22:23:28 -0800755};
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800756
Tom Herbertbf264142010-11-26 08:36:09 +0000757#ifdef CONFIG_XPS
758/*
759 * This structure holds an XPS map which can be of variable length. The
760 * map is an array of queues.
761 */
762struct xps_map {
763 unsigned int len;
764 unsigned int alloc_len;
765 struct rcu_head rcu;
766 u16 queues[0];
767};
Eric Dumazet60b778c2011-12-24 06:56:49 +0000768#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
Helge Dellerc59f4192015-10-25 10:00:32 +0100769#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
770 - sizeof(struct xps_map)) / sizeof(u16))
Tom Herbertbf264142010-11-26 08:36:09 +0000771
772/*
773 * This structure holds all XPS maps for device. Maps are indexed by CPU.
774 */
775struct xps_dev_maps {
776 struct rcu_head rcu;
Amritha Nambiar80d19662018-06-29 21:26:41 -0700777 struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
Tom Herbertbf264142010-11-26 08:36:09 +0000778};
Amritha Nambiar80d19662018-06-29 21:26:41 -0700779
780#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
Alexander Duyck184c4492016-10-28 11:50:13 -0400781 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
Amritha Nambiar80d19662018-06-29 21:26:41 -0700782
783#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
784 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
785
Tom Herbertbf264142010-11-26 08:36:09 +0000786#endif /* CONFIG_XPS */
787
John Fastabend4f57c082011-01-17 08:06:04 +0000788#define TC_MAX_QUEUE 16
789#define TC_BITMASK 15
790/* HW offloaded queuing disciplines txq count and offset maps */
791struct netdev_tc_txq {
792 u16 count;
793 u16 offset;
794};
795
Neerav Parikh68bad942012-01-04 20:23:39 +0000796#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
797/*
798 * This structure is to hold information about the device
799 * configured to run FCoE protocol stack.
800 */
801struct netdev_fcoe_hbainfo {
802 char manufacturer[64];
803 char serial_number[64];
804 char hardware_version[64];
805 char driver_version[64];
806 char optionrom_version[64];
807 char firmware_version[64];
808 char model[256];
809 char model_description[256];
810};
811#endif
812
Jiri Pirko02637fc2014-11-28 14:34:16 +0100813#define MAX_PHYS_ITEM_ID_LEN 32
Jiri Pirko66b52b02013-07-29 18:16:49 +0200814
Jiri Pirko02637fc2014-11-28 14:34:16 +0100815/* This structure holds a unique identifier to identify some
816 * physical item (port for example) used by a netdevice.
Jiri Pirko66b52b02013-07-29 18:16:49 +0200817 */
Jiri Pirko02637fc2014-11-28 14:34:16 +0100818struct netdev_phys_item_id {
819 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
Jiri Pirko66b52b02013-07-29 18:16:49 +0200820 unsigned char id_len;
821};
822
Scott Feldmand754f982015-07-18 18:24:49 -0700823static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
824 struct netdev_phys_item_id *b)
825{
826 return a->id_len == b->id_len &&
827 memcmp(a->id, b->id, a->id_len) == 0;
828}
829
Daniel Borkmann99932d42014-02-16 15:55:20 +0100830typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
Alexander Duyck8ec56fc2018-07-09 12:20:04 -0400831 struct sk_buff *skb,
832 struct net_device *sb_dev);
Daniel Borkmann99932d42014-02-16 15:55:20 +0100833
Jiri Pirko2572ac52017-08-07 10:15:17 +0200834enum tc_setup_type {
Nogah Frankel575ed7d2017-11-06 07:23:42 +0100835 TC_SETUP_QDISC_MQPRIO,
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800836 TC_SETUP_CLSU32,
Amir Vadai5b33f482016-03-08 12:42:29 +0200837 TC_SETUP_CLSFLOWER,
Jiri Pirkoade9b652017-08-07 10:15:18 +0200838 TC_SETUP_CLSMATCHALL,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100839 TC_SETUP_CLSBPF,
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200840 TC_SETUP_BLOCK,
Nogah Frankel8521db42017-11-06 07:23:43 +0100841 TC_SETUP_QDISC_CBS,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100842 TC_SETUP_QDISC_RED,
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100843 TC_SETUP_QDISC_PRIO,
Jakub Kicinskif971b132018-05-25 21:53:35 -0700844 TC_SETUP_QDISC_MQ,
Vinicius Costa Gomes25db26a2018-07-03 15:42:53 -0700845 TC_SETUP_QDISC_ETF,
Jakub Kicinski98b0e5f2018-11-12 14:58:10 -0800846 TC_SETUP_ROOT_QDISC,
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800847 TC_SETUP_QDISC_GRED,
John Fastabend16e5cc62016-02-16 21:16:43 -0800848};
849
Jakub Kicinskif4e63522017-11-03 13:56:16 -0700850/* These structures hold the attributes of bpf state that are being passed
851 * to the netdevice through the bpf op.
Brenden Blancoa7862b42016-07-19 12:16:48 -0700852 */
Jakub Kicinskif4e63522017-11-03 13:56:16 -0700853enum bpf_netdev_command {
Brenden Blancoa7862b42016-07-19 12:16:48 -0700854 /* Set or clear a bpf program used in the earliest stages of packet
855 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
856 * is responsible for calling bpf_prog_put on any old progs that are
857 * stored. In case of error, the callee need not release the new prog
858 * reference, but on success it takes ownership and must bpf_prog_put
859 * when it is no longer used.
860 */
861 XDP_SETUP_PROG,
Jakub Kicinskiee5d0322017-06-21 18:25:04 -0700862 XDP_SETUP_PROG_HW,
Brenden Blancoa7862b42016-07-19 12:16:48 -0700863 XDP_QUERY_PROG,
Jakub Kicinskia25717d2018-07-11 20:36:41 -0700864 XDP_QUERY_PROG_HW,
Jakub Kicinskiab3f0062017-11-03 13:56:17 -0700865 /* BPF program for offload callbacks, invoked at program load time. */
Jakub Kicinskia3884572018-01-11 20:29:09 -0800866 BPF_OFFLOAD_MAP_ALLOC,
867 BPF_OFFLOAD_MAP_FREE,
Björn Töpel74515c52018-06-04 14:05:53 +0200868 XDP_SETUP_XSK_UMEM,
Brenden Blancoa7862b42016-07-19 12:16:48 -0700869};
870
Jakub Kicinskicae19272017-12-27 18:39:05 -0800871struct bpf_prog_offload_ops;
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700872struct netlink_ext_ack;
Björn Töpel74515c52018-06-04 14:05:53 +0200873struct xdp_umem;
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700874
Jakub Kicinskif4e63522017-11-03 13:56:16 -0700875struct netdev_bpf {
876 enum bpf_netdev_command command;
Brenden Blancoa7862b42016-07-19 12:16:48 -0700877 union {
878 /* XDP_SETUP_PROG */
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700879 struct {
Jakub Kicinski32d60272017-06-21 18:25:03 -0700880 u32 flags;
Jakub Kicinskiddf9f972017-04-30 21:46:46 -0700881 struct bpf_prog *prog;
882 struct netlink_ext_ack *extack;
883 };
Jakub Kicinskia25717d2018-07-11 20:36:41 -0700884 /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
Martin KaFai Lau58038692017-06-15 17:29:09 -0700885 struct {
Martin KaFai Lau58038692017-06-15 17:29:09 -0700886 u32 prog_id;
Jakub Kicinski92f02922017-12-01 15:08:56 -0800887 /* flags with which program was installed */
888 u32 prog_flags;
Martin KaFai Lau58038692017-06-15 17:29:09 -0700889 };
Jakub Kicinskia3884572018-01-11 20:29:09 -0800890 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
891 struct {
892 struct bpf_offloaded_map *offmap;
893 };
Jan Sokolowskif8ebfaf2019-02-13 18:07:29 +0100894 /* XDP_SETUP_XSK_UMEM */
Björn Töpel74515c52018-06-04 14:05:53 +0200895 struct {
Jan Sokolowskif8ebfaf2019-02-13 18:07:29 +0100896 struct xdp_umem *umem;
897 u16 queue_id;
Björn Töpel74515c52018-06-04 14:05:53 +0200898 } xsk;
Brenden Blancoa7862b42016-07-19 12:16:48 -0700899 };
900};
John Fastabend16e5cc62016-02-16 21:16:43 -0800901
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200902#ifdef CONFIG_XFRM_OFFLOAD
903struct xfrmdev_ops {
904 int (*xdo_dev_state_add) (struct xfrm_state *x);
905 void (*xdo_dev_state_delete) (struct xfrm_state *x);
906 void (*xdo_dev_state_free) (struct xfrm_state *x);
907 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
908 struct xfrm_state *x);
Yossef Efraim50bd8702018-01-14 11:39:10 +0200909 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200910};
911#endif
912
Florian Westphal6c557002017-10-02 23:50:05 +0200913struct dev_ifalias {
914 struct rcu_head rcuhead;
915 char ifalias[];
916};
917
Jakub Kicinskib473b0d2019-02-25 19:34:03 -0800918struct devlink;
Jakub Kicinskida68b4a2019-04-25 12:32:03 -0700919struct tlsdev_ops;
Jakub Kicinskib473b0d2019-02-25 19:34:03 -0800920
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800921/*
922 * This structure defines the management hooks for network devices.
Stephen Hemminger00829822008-11-20 20:14:53 -0800923 * The following hooks can be defined; unless noted otherwise, they are
924 * optional and can be filled with a null pointer.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800925 *
926 * int (*ndo_init)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500927 * This function is called once when a network device is registered.
928 * The network device can use this for any late stage initialization
929 * or semantic validation. It can fail with an error code which will
930 * be propagated back to register_netdev.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800931 *
932 * void (*ndo_uninit)(struct net_device *dev);
933 * This function is called when device is unregistered or when registration
934 * fails. It is not called if init fails.
935 *
936 * int (*ndo_open)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500937 * This function is called when a network device transitions to the up
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800938 * state.
939 *
940 * int (*ndo_stop)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500941 * This function is called when a network device transitions to the down
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800942 * state.
943 *
Stephen Hemmingerdc1f8bf2009-08-31 19:50:40 +0000944 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
945 * struct net_device *dev);
Stephen Hemminger00829822008-11-20 20:14:53 -0800946 * Called when a packet needs to be transmitted.
Rusty Russelle79d8422015-04-03 22:17:17 +1030947 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
948 * the queue before that can happen; it's for obsolete devices and weird
949 * corner cases, but the stack really does a non-trivial amount
950 * of useless work if you return NETDEV_TX_BUSY.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500951 * Required; cannot be NULL.
Stephen Hemminger00829822008-11-20 20:14:53 -0800952 *
Dimitris Michailidis1a2a1442017-01-31 16:03:13 -0800953 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
954 * struct net_device *dev
955 * netdev_features_t features);
956 * Called by core transmit path to determine if device is capable of
957 * performing offload operations on a given packet. This is to give
958 * the device an opportunity to implement any restrictions that cannot
959 * be otherwise expressed by feature flags. The check is called with
960 * the set of features that the stack has calculated and it returns
961 * those the driver believes to be appropriate.
Eric Dumazetcdba7562016-01-06 06:53:50 -0800962 *
Jason Wangf663dd92014-01-10 16:18:26 +0800963 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +0100964 * struct net_device *sb_dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500965 * Called to decide which queue to use when device supports multiple
Stephen Hemminger00829822008-11-20 20:14:53 -0800966 * transmit queues.
967 *
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800968 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
969 * This function is called to allow device receiver to make
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500970 * changes to configuration when multicast or promiscuous is enabled.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800971 *
972 * void (*ndo_set_rx_mode)(struct net_device *dev);
973 * This function is called device changes address list filtering.
Jiri Pirko01789342011-08-16 06:29:00 +0000974 * If driver handles unicast address filtering, it should set
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500975 * IFF_UNICAST_FLT in its priv_flags.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800976 *
977 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
978 * This function is called when the Media Access Control address
Mike Rapoport37b607c2009-04-27 05:45:54 -0700979 * needs to be changed. If this interface is not defined, the
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500980 * MAC address can not be changed.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800981 *
982 * int (*ndo_validate_addr)(struct net_device *dev);
983 * Test if Media Access Control address is valid for the device.
984 *
985 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500986 * Called when a user requests an ioctl which can't be handled by
987 * the generic interface code. If not defined ioctls return
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800988 * not supported error code.
989 *
990 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
991 * Used to set network devices bus interface parameters. This interface
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -0500992 * is retained for legacy reasons; new devices should use the bus
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800993 * interface (PCI) for low level management.
994 *
995 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
996 * Called when a user wants to change the Maximum Transfer Unit
Magnus Dammdb46a0e2017-06-14 16:15:24 +0900997 * of a device.
Stephen Hemmingerd3147742008-11-19 21:32:24 -0800998 *
Stephen Hemminger00829822008-11-20 20:14:53 -0800999 * void (*ndo_tx_timeout)(struct net_device *dev);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001000 * Callback used when the transmitter has not made any progress
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001001 * for dev->watchdog ticks.
1002 *
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001003 * void (*ndo_get_stats64)(struct net_device *dev,
1004 * struct rtnl_link_stats64 *storage);
Wolfram Sangd308e382009-10-07 13:53:11 -07001005 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001006 * Called when a user wants to get the network device usage
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00001007 * statistics. Drivers must do one of the following:
Ben Hutchings3cfde792010-07-09 09:11:52 +00001008 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1009 * rtnl_link_stats64 structure passed by the caller.
Ben Hutchings82695d92010-06-15 15:08:48 -07001010 * 2. Define @ndo_get_stats to update a net_device_stats structure
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +00001011 * (which should normally be dev->stats) and return a pointer to
1012 * it. The structure may be changed asynchronously only if each
1013 * field is written atomically.
1014 * 3. Update dev->stats asynchronously and atomically, and define
1015 * neither operation.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001016 *
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001017 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
Nogah Frankel2c9d85d2016-09-16 15:05:36 +02001018 * Return true if this device supports offload stats of this attr_id.
1019 *
1020 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1021 * void *attr_data)
1022 * Get statistics for offload operations by attr_id. Write it into the
1023 * attr_data pointer.
1024 *
B Viswanath5d632cb2015-01-12 14:46:25 +05301025 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001026 * If device supports VLAN filtering this function is called when a
Patrick McHardy80d5c362013-04-19 02:04:28 +00001027 * VLAN id is registered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001028 *
B Viswanath5d632cb2015-01-12 14:46:25 +05301029 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001030 * If device supports VLAN filtering this function is called when a
Patrick McHardy80d5c362013-04-19 02:04:28 +00001031 * VLAN id is unregistered.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001032 *
1033 * void (*ndo_poll_controller)(struct net_device *dev);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001034 *
1035 * SR-IOV management functions.
1036 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
Moshe Shemesh79aab092016-09-22 12:11:15 +03001037 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1038 * u8 qos, __be16 proto);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001039 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1040 * int max_tx_rate);
Greg Rose5f8444a2011-10-08 03:05:24 +00001041 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
Hiroshi Shimamotodd461d62015-08-28 06:57:55 +00001042 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001043 * int (*ndo_get_vf_config)(struct net_device *dev,
1044 * int vf, struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +03001045 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
Scott Feldman57b61082010-05-17 22:49:55 -07001046 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1047 * struct nlattr *port[]);
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001048 *
1049 * Enable or disable the VF ability to query its RSS Redirection Table and
1050 * Hash Key. This is needed since on some devices VF share this information
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001051 * with PF and querying it may introduce a theoretical security risk.
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001052 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
Scott Feldman57b61082010-05-17 22:49:55 -07001053 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
Jiri Pirko2572ac52017-08-07 10:15:17 +02001054 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001055 * void *type_data);
Florian Fainelli6a4bc2b2017-01-26 14:44:17 -08001056 * Called to setup any 'tc' scheduler, classifier or action on @dev.
1057 * This is always called from the stack with the rtnl lock held and netif
1058 * tx queues stopped. This allows the netdevice to perform queue
1059 * management safely.
Ben Hutchingsc4454772011-01-19 11:03:53 +00001060 *
Yi Zoue9bce842011-03-09 08:48:03 +00001061 * Fiber Channel over Ethernet (FCoE) offload functions.
1062 * int (*ndo_fcoe_enable)(struct net_device *dev);
1063 * Called when the FCoE protocol stack wants to start using LLD for FCoE
1064 * so the underlying device can perform whatever needed configuration or
1065 * initialization to support acceleration of FCoE traffic.
1066 *
1067 * int (*ndo_fcoe_disable)(struct net_device *dev);
1068 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
1069 * so the underlying device can perform whatever needed clean-ups to
1070 * stop supporting acceleration of FCoE traffic.
1071 *
1072 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1073 * struct scatterlist *sgl, unsigned int sgc);
1074 * Called when the FCoE Initiator wants to initialize an I/O that
1075 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1076 * perform necessary setup and returns 1 to indicate the device is set up
1077 * successfully to perform DDP on this I/O, otherwise this returns 0.
1078 *
1079 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
1080 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
1081 * indicated by the FC exchange id 'xid', so the underlying device can
1082 * clean up and reuse resources for later DDP requests.
1083 *
1084 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1085 * struct scatterlist *sgl, unsigned int sgc);
1086 * Called when the FCoE Target wants to initialize an I/O that
1087 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1088 * perform necessary setup and returns 1 to indicate the device is set up
1089 * successfully to perform DDP on this I/O, otherwise this returns 0.
1090 *
Neerav Parikh68bad942012-01-04 20:23:39 +00001091 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1092 * struct netdev_fcoe_hbainfo *hbainfo);
1093 * Called when the FCoE Protocol stack wants information on the underlying
1094 * device. This information is utilized by the FCoE protocol stack to
1095 * register attributes with Fiber Channel management service as per the
1096 * FC-GS Fabric Device Management Information(FDMI) specification.
1097 *
Yi Zoue9bce842011-03-09 08:48:03 +00001098 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1099 * Called when the underlying device wants to override default World Wide
1100 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1101 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1102 * protocol stack to use.
1103 *
Ben Hutchingsc4454772011-01-19 11:03:53 +00001104 * RFS acceleration.
1105 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1106 * u16 rxq_index, u32 flow_id);
1107 * Set hardware filter for RFS. rxq_index is the target queue index;
1108 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1109 * Return the filter ID on success, or a negative error code.
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001110 *
Jiri Pirko8b98a702013-01-03 22:49:02 +00001111 * Slave management functions (for bridge, bonding, etc).
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001112 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1113 * Called to make another netdev an underling.
1114 *
1115 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1116 * Called to release previously enslaved netdev.
Michał Mirosław5455c692011-02-15 16:59:17 +00001117 *
1118 * Feature/offload setting functions.
Dimitris Michailidis1a2a1442017-01-31 16:03:13 -08001119 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1120 * netdev_features_t features);
1121 * Adjusts the requested feature flags according to device-specific
1122 * constraints, and returns the resulting flags. Must not modify
1123 * the device state.
1124 *
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001125 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001126 * Called to update device configuration to new features. Passed
1127 * feature set might be less than what was returned by ndo_fix_features()).
1128 * Must return >0 or -errno if it changed dev->features itself.
1129 *
stephen hemmingeredc7d572012-10-01 12:32:33 +00001130 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1131 * struct net_device *dev,
Petr Machata87b09842019-01-16 23:06:50 +00001132 * const unsigned char *addr, u16 vid, u16 flags,
1133 * struct netlink_ext_ack *extack);
John Fastabend77162022012-04-15 06:43:56 +00001134 * Adds an FDB entry to dev for addr.
Vlad Yasevich1690be62013-02-13 12:00:18 +00001135 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1136 * struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01001137 * const unsigned char *addr, u16 vid)
John Fastabend77162022012-04-15 06:43:56 +00001138 * Deletes the FDB entry from dev coresponding to addr.
1139 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
Jamal Hadi Salim5d5eacb2014-07-10 07:01:58 -04001140 * struct net_device *dev, struct net_device *filter_dev,
Roopa Prabhud2976532016-08-30 21:56:45 -07001141 * int *idx)
John Fastabend77162022012-04-15 06:43:56 +00001142 * Used to add FDB entries to dump requests. Implementers should add
1143 * entries to skb and update idx with the number of entries.
John Fastabende5a55a82012-10-24 08:12:57 +00001144 *
Nicolas Dichtelad41faa2015-03-17 11:16:00 +01001145 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
Petr Machata2fd527b2018-12-12 17:02:48 +00001146 * u16 flags, struct netlink_ext_ack *extack)
John Fastabende5a55a82012-10-24 08:12:57 +00001147 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02001148 * struct net_device *dev, u32 filter_mask,
1149 * int nlflags)
Nicolas Dichtelad41faa2015-03-17 11:16:00 +01001150 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1151 * u16 flags);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001152 *
1153 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1154 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1155 * which do not represent real hardware may define this to allow their
1156 * userspace components to manage their virtual carrier state. Devices
1157 * that determine carrier state from physical hardware properties (eg
1158 * network cables) or protocol-dependent mechanisms (eg
1159 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
Jiri Pirko66b52b02013-07-29 18:16:49 +02001160 *
1161 * int (*ndo_get_phys_port_id)(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01001162 * struct netdev_phys_item_id *ppid);
Jiri Pirko66b52b02013-07-29 18:16:49 +02001163 * Called to get ID of physical port of this device. If driver does
1164 * not implement this, it is assumed that the hw is not able to have
1165 * multiple net devices on single physical port.
Joseph Gasparakis53cf52752013-09-04 02:13:38 -07001166 *
Florian Fainellid6abc5962019-02-06 09:45:35 -08001167 * int (*ndo_get_port_parent_id)(struct net_device *dev,
1168 * struct netdev_phys_item_id *ppid)
1169 * Called to get the parent ID of the physical port of this device.
1170 *
Alexander Duyck7c46a642016-06-16 12:21:00 -07001171 * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1172 * struct udp_tunnel_info *ti);
1173 * Called by UDP tunnel to notify a driver about the UDP port and socket
1174 * address family that a UDP tunnel is listnening to. It is called only
1175 * when a new port starts listening. The operation is protected by the
1176 * RTNL.
1177 *
1178 * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1179 * struct udp_tunnel_info *ti);
1180 * Called by UDP tunnel to notify the driver about a UDP port and socket
1181 * address family that the UDP tunnel is not listening to anymore. The
1182 * operation is protected by the RTNL.
1183 *
John Fastabenda6cc0cf2013-11-06 09:54:46 -08001184 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1185 * struct net_device *dev)
1186 * Called by upper layer devices to accelerate switching or other
1187 * station functionality into hardware. 'pdev is the lowerdev
1188 * to use for the offload and 'dev' is the net device that will
1189 * back the offload. Returns a pointer to the private structure
1190 * the upper layer will maintain.
1191 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1192 * Called by upper layer device to delete the station created
1193 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1194 * the station and priv is the structure returned by the add
1195 * operation.
John Fastabend822b3b22015-03-18 14:57:33 +02001196 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1197 * int queue_index, u32 maxrate);
1198 * Called when a user wants to set a max-rate limitation of specific
1199 * TX queue.
Nicolas Dichtela54acb32015-04-02 17:07:00 +02001200 * int (*ndo_get_iflink)(const struct net_device *dev);
1201 * Called to get the iflink value of this device.
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001202 * void (*ndo_change_proto_down)(struct net_device *dev,
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001203 * bool proto_down);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001204 * This function is used to pass protocol port error state information
1205 * to the switch driver. The switch driver can react to the proto_down
1206 * by doing a phys down on the associated switch port.
Pravin B Shelarfc4099f2015-10-22 18:17:16 -07001207 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1208 * This function is used to get egress tunnel information for given skb.
1209 * This is useful for retrieving outer tunnel header parameters while
1210 * sampling packet.
Paolo Abeni871b6422016-02-26 10:45:37 +01001211 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1212 * This function is used to specify the headroom that the skb must
1213 * consider when allocation skb during packet reception. Setting
1214 * appropriate rx headroom value allows avoiding skb head copy on
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001215 * forward. Setting a negative value resets the rx headroom to the
Paolo Abeni871b6422016-02-26 10:45:37 +01001216 * default value.
Jakub Kicinskif4e63522017-11-03 13:56:16 -07001217 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
Brenden Blancoa7862b42016-07-19 12:16:48 -07001218 * This function is used to set or query state related to XDP on the
Jakub Kicinskif4e63522017-11-03 13:56:16 -07001219 * netdevice and manage BPF offload. See definition of
1220 * enum bpf_netdev_command for details.
Jesper Dangaard Brouer42b33462018-05-31 10:59:47 +02001221 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1222 * u32 flags);
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +02001223 * This function is used to submit @n XDP packets for transmit on a
1224 * netdevice. Returns number of frames successfully transmitted, frames
1225 * that got dropped are freed/returned via xdp_return_frame().
1226 * Returns negative number, means general error invoking ndo, meaning
1227 * no frames were xmit'ed and core-caller will free all frames.
Jiri Pirko5dc37bb2019-03-28 13:56:36 +01001228 * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
1229 * Get devlink port instance associated with a given netdev.
Jakub Kicinskib473b0d2019-02-25 19:34:03 -08001230 * Called with a reference on the netdevice and devlink locks only,
1231 * rtnl_lock is not held.
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001232 */
1233struct net_device_ops {
1234 int (*ndo_init)(struct net_device *dev);
1235 void (*ndo_uninit)(struct net_device *dev);
1236 int (*ndo_open)(struct net_device *dev);
1237 int (*ndo_stop)(struct net_device *dev);
Eric Dumazetcdba7562016-01-06 06:53:50 -08001238 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1239 struct net_device *dev);
1240 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1241 struct net_device *dev,
1242 netdev_features_t features);
Stephen Hemminger00829822008-11-20 20:14:53 -08001243 u16 (*ndo_select_queue)(struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08001244 struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +01001245 struct net_device *sb_dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001246 void (*ndo_change_rx_flags)(struct net_device *dev,
1247 int flags);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001248 void (*ndo_set_rx_mode)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001249 int (*ndo_set_mac_address)(struct net_device *dev,
1250 void *addr);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001251 int (*ndo_validate_addr)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001252 int (*ndo_do_ioctl)(struct net_device *dev,
1253 struct ifreq *ifr, int cmd);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001254 int (*ndo_set_config)(struct net_device *dev,
1255 struct ifmap *map);
Stephen Hemminger00829822008-11-20 20:14:53 -08001256 int (*ndo_change_mtu)(struct net_device *dev,
1257 int new_mtu);
1258 int (*ndo_neigh_setup)(struct net_device *dev,
1259 struct neigh_parms *);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001260 void (*ndo_tx_timeout) (struct net_device *dev);
1261
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001262 void (*ndo_get_stats64)(struct net_device *dev,
1263 struct rtnl_link_stats64 *storage);
Or Gerlitz3df5b3c2016-11-22 23:09:54 +02001264 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
Nogah Frankel2c9d85d2016-09-16 15:05:36 +02001265 int (*ndo_get_offload_stats)(int attr_id,
1266 const struct net_device *dev,
1267 void *attr_data);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001268 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1269
Jiri Pirko8e586132011-12-08 19:52:37 -05001270 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00001271 __be16 proto, u16 vid);
Jiri Pirko8e586132011-12-08 19:52:37 -05001272 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00001273 __be16 proto, u16 vid);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001274#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001275 void (*ndo_poll_controller)(struct net_device *dev);
Herbert Xu4247e162010-06-10 16:12:47 +00001276 int (*ndo_netpoll_setup)(struct net_device *dev,
Eric W. Biedermana8779ec2014-03-27 15:36:38 -07001277 struct netpoll_info *info);
WANG Cong0e34e932010-05-06 00:47:21 -07001278 void (*ndo_netpoll_cleanup)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001279#endif
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001280 int (*ndo_set_vf_mac)(struct net_device *dev,
1281 int queue, u8 *mac);
1282 int (*ndo_set_vf_vlan)(struct net_device *dev,
Moshe Shemesh79aab092016-09-22 12:11:15 +03001283 int queue, u16 vlan,
1284 u8 qos, __be16 proto);
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001285 int (*ndo_set_vf_rate)(struct net_device *dev,
1286 int vf, int min_tx_rate,
1287 int max_tx_rate);
Greg Rose5f8444a2011-10-08 03:05:24 +00001288 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1289 int vf, bool setting);
Hiroshi Shimamotodd461d62015-08-28 06:57:55 +00001290 int (*ndo_set_vf_trust)(struct net_device *dev,
1291 int vf, bool setting);
Williams, Mitch A95c26df2010-02-10 01:43:46 +00001292 int (*ndo_get_vf_config)(struct net_device *dev,
1293 int vf,
1294 struct ifla_vf_info *ivf);
Rony Efraim1d8faf42013-06-13 13:19:10 +03001295 int (*ndo_set_vf_link_state)(struct net_device *dev,
1296 int vf, int link_state);
Eran Ben Elisha3b766cd2015-06-15 17:59:07 +03001297 int (*ndo_get_vf_stats)(struct net_device *dev,
1298 int vf,
1299 struct ifla_vf_stats
1300 *vf_stats);
Scott Feldman57b61082010-05-17 22:49:55 -07001301 int (*ndo_set_vf_port)(struct net_device *dev,
1302 int vf,
1303 struct nlattr *port[]);
1304 int (*ndo_get_vf_port)(struct net_device *dev,
1305 int vf, struct sk_buff *skb);
Eli Cohencc8e27c2016-03-11 22:58:34 +02001306 int (*ndo_set_vf_guid)(struct net_device *dev,
1307 int vf, u64 guid,
1308 int guid_type);
Vlad Zolotarov01a3d792015-03-30 21:35:23 +03001309 int (*ndo_set_vf_rss_query_en)(
1310 struct net_device *dev,
1311 int vf, bool setting);
John Fastabend16e5cc62016-02-16 21:16:43 -08001312 int (*ndo_setup_tc)(struct net_device *dev,
Jiri Pirko2572ac52017-08-07 10:15:17 +02001313 enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001314 void *type_data);
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001315#if IS_ENABLED(CONFIG_FCOE)
Yi Zoucb454392009-08-31 12:31:36 +00001316 int (*ndo_fcoe_enable)(struct net_device *dev);
1317 int (*ndo_fcoe_disable)(struct net_device *dev);
Yi Zou4d288d52009-02-27 14:06:59 -08001318 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1319 u16 xid,
1320 struct scatterlist *sgl,
1321 unsigned int sgc);
1322 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1323 u16 xid);
Yi Zou6247e082011-02-01 07:22:06 +00001324 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1325 u16 xid,
1326 struct scatterlist *sgl,
1327 unsigned int sgc);
Neerav Parikh68bad942012-01-04 20:23:39 +00001328 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1329 struct netdev_fcoe_hbainfo *hbainfo);
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001330#endif
1331
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001332#if IS_ENABLED(CONFIG_LIBFCOE)
Yi Zoudf5c7942009-10-28 18:24:35 +00001333#define NETDEV_FCOE_WWNN 0
1334#define NETDEV_FCOE_WWPN 1
1335 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1336 u64 *wwn, int type);
Yi Zou4d288d52009-02-27 14:06:59 -08001337#endif
Bhanu Prakash Gollapudi3c9c36bc2011-08-26 09:45:41 +00001338
Ben Hutchingsc4454772011-01-19 11:03:53 +00001339#ifdef CONFIG_RFS_ACCEL
1340 int (*ndo_rx_flow_steer)(struct net_device *dev,
1341 const struct sk_buff *skb,
1342 u16 rxq_index,
1343 u32 flow_id);
1344#endif
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001345 int (*ndo_add_slave)(struct net_device *dev,
David Ahern33eaf2a2017-10-04 17:48:46 -07001346 struct net_device *slave_dev,
1347 struct netlink_ext_ack *extack);
Jiri Pirkofbaec0e2011-02-13 10:15:37 +00001348 int (*ndo_del_slave)(struct net_device *dev,
1349 struct net_device *slave_dev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001350 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1351 netdev_features_t features);
Michał Mirosław5455c692011-02-15 16:59:17 +00001352 int (*ndo_set_features)(struct net_device *dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001353 netdev_features_t features);
Jiri Pirko503eebc2016-07-05 11:27:37 +02001354 int (*ndo_neigh_construct)(struct net_device *dev,
1355 struct neighbour *n);
1356 void (*ndo_neigh_destroy)(struct net_device *dev,
1357 struct neighbour *n);
John Fastabend77162022012-04-15 06:43:56 +00001358
1359 int (*ndo_fdb_add)(struct ndmsg *ndm,
stephen hemmingeredc7d572012-10-01 12:32:33 +00001360 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001361 struct net_device *dev,
stephen hemminger6b6e2722012-09-17 10:03:26 +00001362 const unsigned char *addr,
Jiri Pirkof6f64242014-11-28 14:34:15 +01001363 u16 vid,
Petr Machata87b09842019-01-16 23:06:50 +00001364 u16 flags,
1365 struct netlink_ext_ack *extack);
John Fastabend77162022012-04-15 06:43:56 +00001366 int (*ndo_fdb_del)(struct ndmsg *ndm,
Vlad Yasevich1690be62013-02-13 12:00:18 +00001367 struct nlattr *tb[],
John Fastabend77162022012-04-15 06:43:56 +00001368 struct net_device *dev,
Jiri Pirkof6f64242014-11-28 14:34:15 +01001369 const unsigned char *addr,
1370 u16 vid);
John Fastabend77162022012-04-15 06:43:56 +00001371 int (*ndo_fdb_dump)(struct sk_buff *skb,
1372 struct netlink_callback *cb,
1373 struct net_device *dev,
Jamal Hadi Salim5d5eacb2014-07-10 07:01:58 -04001374 struct net_device *filter_dev,
Roopa Prabhud2976532016-08-30 21:56:45 -07001375 int *idx);
Roopa Prabhu5b2f94b2018-12-15 22:35:08 -08001376 int (*ndo_fdb_get)(struct sk_buff *skb,
1377 struct nlattr *tb[],
1378 struct net_device *dev,
1379 const unsigned char *addr,
1380 u16 vid, u32 portid, u32 seq,
1381 struct netlink_ext_ack *extack);
John Fastabende5a55a82012-10-24 08:12:57 +00001382 int (*ndo_bridge_setlink)(struct net_device *dev,
Roopa Prabhuadd511b2015-01-29 22:40:12 -08001383 struct nlmsghdr *nlh,
Petr Machata2fd527b2018-12-12 17:02:48 +00001384 u16 flags,
1385 struct netlink_ext_ack *extack);
John Fastabende5a55a82012-10-24 08:12:57 +00001386 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1387 u32 pid, u32 seq,
Vlad Yasevich6cbdcee2013-02-13 12:00:13 +00001388 struct net_device *dev,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02001389 u32 filter_mask,
1390 int nlflags);
Vlad Yasevich407af322013-02-13 12:00:12 +00001391 int (*ndo_bridge_dellink)(struct net_device *dev,
Roopa Prabhuadd511b2015-01-29 22:40:12 -08001392 struct nlmsghdr *nlh,
1393 u16 flags);
Jiri Pirko4bf84c32012-12-27 23:49:37 +00001394 int (*ndo_change_carrier)(struct net_device *dev,
1395 bool new_carrier);
Jiri Pirko66b52b02013-07-29 18:16:49 +02001396 int (*ndo_get_phys_port_id)(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01001397 struct netdev_phys_item_id *ppid);
Florian Fainellid6abc5962019-02-06 09:45:35 -08001398 int (*ndo_get_port_parent_id)(struct net_device *dev,
1399 struct netdev_phys_item_id *ppid);
David Aherndb24a902015-03-17 20:23:15 -06001400 int (*ndo_get_phys_port_name)(struct net_device *dev,
1401 char *name, size_t len);
Alexander Duyck7c46a642016-06-16 12:21:00 -07001402 void (*ndo_udp_tunnel_add)(struct net_device *dev,
1403 struct udp_tunnel_info *ti);
1404 void (*ndo_udp_tunnel_del)(struct net_device *dev,
1405 struct udp_tunnel_info *ti);
John Fastabenda6cc0cf2013-11-06 09:54:46 -08001406 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1407 struct net_device *dev);
1408 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1409 void *priv);
1410
Vlad Yasevich25175ba2014-05-16 17:04:54 -04001411 int (*ndo_get_lock_subclass)(struct net_device *dev);
John Fastabend822b3b22015-03-18 14:57:33 +02001412 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1413 int queue_index,
1414 u32 maxrate);
Nicolas Dichtela54acb32015-04-02 17:07:00 +02001415 int (*ndo_get_iflink)(const struct net_device *dev);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001416 int (*ndo_change_proto_down)(struct net_device *dev,
1417 bool proto_down);
Pravin B Shelarfc4099f2015-10-22 18:17:16 -07001418 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1419 struct sk_buff *skb);
Paolo Abeni871b6422016-02-26 10:45:37 +01001420 void (*ndo_set_rx_headroom)(struct net_device *dev,
1421 int needed_headroom);
Jakub Kicinskif4e63522017-11-03 13:56:16 -07001422 int (*ndo_bpf)(struct net_device *dev,
1423 struct netdev_bpf *bpf);
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +02001424 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
Jesper Dangaard Brouer42b33462018-05-31 10:59:47 +02001425 struct xdp_frame **xdp,
1426 u32 flags);
Magnus Karlssone3760c72018-06-04 14:05:56 +02001427 int (*ndo_xsk_async_xmit)(struct net_device *dev,
1428 u32 queue_id);
Jiri Pirko5dc37bb2019-03-28 13:56:36 +01001429 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001430};
1431
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001432/**
1433 * enum net_device_priv_flags - &struct net_device priv_flags
1434 *
1435 * These are the &struct net_device, they are only set internally
1436 * by drivers and used in the kernel. These flags are invisible to
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001437 * userspace; this means that the order of these flags can change
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001438 * during any kernel release.
1439 *
1440 * You should have a pretty good reason to be extending these flags.
1441 *
1442 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1443 * @IFF_EBRIDGE: Ethernet bridging device
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001444 * @IFF_BONDING: bonding master or slave
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001445 * @IFF_ISATAP: ISATAP interface (RFC4214)
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001446 * @IFF_WAN_HDLC: WAN HDLC device
1447 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1448 * release skb->dst
1449 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1450 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1451 * @IFF_MACVLAN_PORT: device used as macvlan port
1452 * @IFF_BRIDGE_PORT: device used as bridge port
1453 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1454 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1455 * @IFF_UNICAST_FLT: Supports unicast filtering
1456 * @IFF_TEAM_PORT: device used as team port
1457 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1458 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1459 * change when it's running
1460 * @IFF_MACVLAN: Macvlan device
Luis de Bethencourt6d0e24c2016-03-21 20:58:28 +00001461 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1462 * underlying stacked devices
David Ahern007979e2015-09-29 20:07:10 -07001463 * @IFF_L3MDEV_MASTER: device is an L3 master device
Phil Sutterfa8187c2015-08-13 19:01:06 +02001464 * @IFF_NO_QUEUE: device can run without qdisc attached
Jiri Pirko35d4e172015-08-27 09:31:20 +02001465 * @IFF_OPENVSWITCH: device is a Open vSwitch master
David Ahernfee6d4c2015-10-05 08:51:24 -07001466 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
Jiri Pirkoc981e422015-12-03 12:12:06 +01001467 * @IFF_TEAM: device is a team device
Keller, Jacob Ed4ab4282016-02-08 16:05:03 -08001468 * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
Paolo Abeni871b6422016-02-26 10:45:37 +01001469 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1470 * entity (i.e. the master device for bridged veth)
Sabrina Dubroca3c175782016-03-11 18:07:32 +01001471 * @IFF_MACSEC: device is a MACsec device
Paolo Abenif54262502018-03-09 10:39:24 +01001472 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07001473 * @IFF_FAILOVER: device is a failover master device
1474 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
Daniel Borkmannd5256082019-01-30 12:49:48 +01001475 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
Si-Wei Liu8065a772019-04-08 19:45:27 -04001476 * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001477 */
1478enum netdev_priv_flags {
1479 IFF_802_1Q_VLAN = 1<<0,
1480 IFF_EBRIDGE = 1<<1,
Jiri Pirko0dc15492015-08-27 09:31:21 +02001481 IFF_BONDING = 1<<2,
1482 IFF_ISATAP = 1<<3,
1483 IFF_WAN_HDLC = 1<<4,
1484 IFF_XMIT_DST_RELEASE = 1<<5,
1485 IFF_DONT_BRIDGE = 1<<6,
1486 IFF_DISABLE_NETPOLL = 1<<7,
1487 IFF_MACVLAN_PORT = 1<<8,
1488 IFF_BRIDGE_PORT = 1<<9,
1489 IFF_OVS_DATAPATH = 1<<10,
1490 IFF_TX_SKB_SHARING = 1<<11,
1491 IFF_UNICAST_FLT = 1<<12,
1492 IFF_TEAM_PORT = 1<<13,
1493 IFF_SUPP_NOFCS = 1<<14,
1494 IFF_LIVE_ADDR_CHANGE = 1<<15,
1495 IFF_MACVLAN = 1<<16,
1496 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
Paolo Abeni1ec54cb2018-03-06 10:56:31 +01001497 IFF_L3MDEV_MASTER = 1<<18,
1498 IFF_NO_QUEUE = 1<<19,
1499 IFF_OPENVSWITCH = 1<<20,
1500 IFF_L3MDEV_SLAVE = 1<<21,
1501 IFF_TEAM = 1<<22,
1502 IFF_RXFH_CONFIGURED = 1<<23,
1503 IFF_PHONY_HEADROOM = 1<<24,
1504 IFF_MACSEC = 1<<25,
Paolo Abenif54262502018-03-09 10:39:24 +01001505 IFF_NO_RX_HANDLER = 1<<26,
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07001506 IFF_FAILOVER = 1<<27,
1507 IFF_FAILOVER_SLAVE = 1<<28,
Daniel Borkmannd5256082019-01-30 12:49:48 +01001508 IFF_L3MDEV_RX_HANDLER = 1<<29,
Si-Wei Liu8065a772019-04-08 19:45:27 -04001509 IFF_LIVE_RENAME_OK = 1<<30,
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001510};
1511
1512#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1513#define IFF_EBRIDGE IFF_EBRIDGE
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001514#define IFF_BONDING IFF_BONDING
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001515#define IFF_ISATAP IFF_ISATAP
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001516#define IFF_WAN_HDLC IFF_WAN_HDLC
1517#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1518#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1519#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1520#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1521#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1522#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1523#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1524#define IFF_UNICAST_FLT IFF_UNICAST_FLT
1525#define IFF_TEAM_PORT IFF_TEAM_PORT
1526#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1527#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1528#define IFF_MACVLAN IFF_MACVLAN
Eric Dumazet02875872014-10-05 18:38:35 -07001529#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
David Ahern007979e2015-09-29 20:07:10 -07001530#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
Phil Sutterfa8187c2015-08-13 19:01:06 +02001531#define IFF_NO_QUEUE IFF_NO_QUEUE
Jiri Pirko35d4e172015-08-27 09:31:20 +02001532#define IFF_OPENVSWITCH IFF_OPENVSWITCH
Jiri Pirko8f253482015-11-04 14:59:06 +01001533#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
Jiri Pirkoc981e422015-12-03 12:12:06 +01001534#define IFF_TEAM IFF_TEAM
Keller, Jacob Ed4ab4282016-02-08 16:05:03 -08001535#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
Sabrina Dubroca3c175782016-03-11 18:07:32 +01001536#define IFF_MACSEC IFF_MACSEC
Paolo Abenif54262502018-03-09 10:39:24 +01001537#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07001538#define IFF_FAILOVER IFF_FAILOVER
1539#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
Daniel Borkmannd5256082019-01-30 12:49:48 +01001540#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
Si-Wei Liu8065a772019-04-08 19:45:27 -04001541#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
Luis R. Rodriguez7aa98042014-02-25 17:15:13 -08001542
Karoly Kemeny536721b2014-07-30 20:27:36 +02001543/**
1544 * struct net_device - The DEVICE structure.
Mauro Carvalho Chehabd6519832017-05-12 09:35:46 -03001545 *
1546 * Actually, this whole structure is a big mistake. It mixes I/O
1547 * data with strictly "high-level" data, and it has to know about
1548 * almost every data structure used in the INET module.
Karoly Kemeny536721b2014-07-30 20:27:36 +02001549 *
1550 * @name: This is the first field of the "visible" part of this structure
1551 * (i.e. as seen by users in the "Space.c" file). It is the name
Mauro Carvalho Chehabd6519832017-05-12 09:35:46 -03001552 * of the interface.
Karoly Kemeny536721b2014-07-30 20:27:36 +02001553 *
1554 * @name_hlist: Device name hash chain, please keep it close to name[]
1555 * @ifalias: SNMP alias
1556 * @mem_end: Shared memory end
1557 * @mem_start: Shared memory start
1558 * @base_addr: Device I/O address
1559 * @irq: Device IRQ number
1560 *
1561 * @state: Generic network queuing layer state, see netdev_state_t
1562 * @dev_list: The global list of network devices
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001563 * @napi_list: List entry used for polling NAPI devices
1564 * @unreg_list: List entry when we are unregistering the
1565 * device; see the function unregister_netdev
1566 * @close_list: List entry used when we are closing the device
Benjamin Poirier62d885fe2016-03-21 14:08:28 -07001567 * @ptype_all: Device-specific packet handlers for all protocols
1568 * @ptype_specific: Device-specific, protocol-specific packet handlers
Karoly Kemeny536721b2014-07-30 20:27:36 +02001569 *
1570 * @adj_list: Directly linked devices, like slaves for bonding
Karoly Kemeny536721b2014-07-30 20:27:36 +02001571 * @features: Currently active device features
1572 * @hw_features: User-changeable features
1573 *
1574 * @wanted_features: User-requested features
1575 * @vlan_features: Mask of features inheritable by VLAN devices
1576 *
1577 * @hw_enc_features: Mask of features inherited by encapsulating devices
1578 * This field indicates what encapsulation
1579 * offloads the hardware is capable of doing,
1580 * and drivers will need to set them appropriately.
1581 *
1582 * @mpls_features: Mask of features inheritable by MPLS
1583 *
1584 * @ifindex: interface index
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001585 * @group: The group the device belongs to
Karoly Kemeny536721b2014-07-30 20:27:36 +02001586 *
1587 * @stats: Statistics struct, which was left as a legacy, use
1588 * rtnl_link_stats64 instead
1589 *
1590 * @rx_dropped: Dropped packets by core network,
1591 * do not use this in drivers
1592 * @tx_dropped: Dropped packets by core network,
1593 * do not use this in drivers
Jarod Wilson6e7333d2016-02-01 18:51:05 -05001594 * @rx_nohandler: nohandler dropped packets by core network on
1595 * inactive devices, do not use this in drivers
Florian Fainelli9e55e5d2018-01-22 19:14:25 -08001596 * @carrier_up_count: Number of times the carrier has been up
1597 * @carrier_down_count: Number of times the carrier has been down
Karoly Kemeny536721b2014-07-30 20:27:36 +02001598 *
Karoly Kemeny536721b2014-07-30 20:27:36 +02001599 * @wireless_handlers: List of functions to handle Wireless Extensions,
1600 * instead of ioctl,
1601 * see <net/iw_handler.h> for details.
1602 * @wireless_data: Instance data managed by the core of wireless extensions
1603 *
1604 * @netdev_ops: Includes several pointers to callbacks,
1605 * if one wants to override the ndo_*() functions
1606 * @ethtool_ops: Management operations
Alexander Aringf997c552016-06-15 21:20:23 +02001607 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1608 * discovery handling. Necessary for e.g. 6LoWPAN.
Eric W. Biedermand4760592015-03-02 00:11:09 -06001609 * @header_ops: Includes callbacks for creating,parsing,caching,etc
Karoly Kemeny536721b2014-07-30 20:27:36 +02001610 * of Layer 2 headers.
1611 *
1612 * @flags: Interface flags (a la BSD)
1613 * @priv_flags: Like 'flags' but invisible to userspace,
1614 * see if.h for the definitions
1615 * @gflags: Global flags ( kept as legacy )
1616 * @padded: How much padding added by alloc_netdev()
1617 * @operstate: RFC2863 operstate
1618 * @link_mode: Mapping policy to operstate
1619 * @if_port: Selectable AUI, TP, ...
1620 * @dma: DMA channel
1621 * @mtu: Interface MTU value
Jarod Wilson61e84622016-10-07 22:04:33 -04001622 * @min_mtu: Interface Minimum MTU value
1623 * @max_mtu: Interface Maximum MTU value
Karoly Kemeny536721b2014-07-30 20:27:36 +02001624 * @type: Interface hardware type
Willem de Bruijn2793a232016-03-09 21:58:32 -05001625 * @hard_header_len: Maximum hardware header length.
Willem de Bruijn217e6fa2017-02-07 15:57:20 -05001626 * @min_header_len: Minimum hardware header length
Karoly Kemeny536721b2014-07-30 20:27:36 +02001627 *
1628 * @needed_headroom: Extra headroom the hardware may need, but not in all
1629 * cases can this be guaranteed
1630 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1631 * cases can this be guaranteed. Some cases also use
1632 * LL_MAX_HEADER instead to allocate the skb
1633 *
1634 * interface address info:
1635 *
1636 * @perm_addr: Permanent hw address
1637 * @addr_assign_type: Hw address assignment type
1638 * @addr_len: Hardware address length
Alexander Aring8626a0c2016-06-15 21:20:16 +02001639 * @neigh_priv_len: Used in neigh_alloc()
Karoly Kemeny536721b2014-07-30 20:27:36 +02001640 * @dev_id: Used to differentiate devices that share
1641 * the same link layer address
1642 * @dev_port: Used to differentiate devices that share
1643 * the same function
1644 * @addr_list_lock: XXX: need comments on this one
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001645 * @uc_promisc: Counter that indicates promiscuous mode
Karoly Kemeny536721b2014-07-30 20:27:36 +02001646 * has been enabled due to the need to listen to
1647 * additional unicast addresses in a device that
1648 * does not implement ndo_set_rx_mode()
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001649 * @uc: unicast mac addresses
1650 * @mc: multicast mac addresses
1651 * @dev_addrs: list of device hw addresses
1652 * @queues_kset: Group of all Kobjects in the Tx and RX queues
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001653 * @promiscuity: Number of times the NIC is told to work in
1654 * promiscuous mode; if it becomes 0 the NIC will
1655 * exit promiscuous mode
Karoly Kemeny536721b2014-07-30 20:27:36 +02001656 * @allmulti: Counter, enables or disables allmulticast mode
1657 *
1658 * @vlan_info: VLAN info
1659 * @dsa_ptr: dsa specific data
1660 * @tipc_ptr: TIPC specific data
1661 * @atalk_ptr: AppleTalk link
1662 * @ip_ptr: IPv4 specific data
1663 * @dn_ptr: DECnet specific data
1664 * @ip6_ptr: IPv6 specific data
1665 * @ax25_ptr: AX.25 specific data
1666 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1667 *
Karoly Kemeny536721b2014-07-30 20:27:36 +02001668 * @dev_addr: Hw address (before bcast,
1669 * because most packets are unicast)
1670 *
1671 * @_rx: Array of RX queues
1672 * @num_rx_queues: Number of RX queues
1673 * allocated at register_netdev() time
1674 * @real_num_rx_queues: Number of RX queues currently active in device
1675 *
1676 * @rx_handler: handler for received packets
1677 * @rx_handler_data: XXX: need comments on this one
Jiri Pirko46209402017-11-03 11:46:25 +01001678 * @miniq_ingress: ingress/clsact qdisc specific data for
1679 * ingress processing
Karoly Kemeny536721b2014-07-30 20:27:36 +02001680 * @ingress_queue: XXX: need comments on this one
1681 * @broadcast: hw bcast address
1682 *
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001683 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1684 * indexed by RX queue number. Assigned by driver.
1685 * This must only be set if the ndo_rx_flow_steer
1686 * operation is defined
1687 * @index_hlist: Device index hash chain
1688 *
Karoly Kemeny536721b2014-07-30 20:27:36 +02001689 * @_tx: Array of TX queues
1690 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1691 * @real_num_tx_queues: Number of TX queues currently active in device
1692 * @qdisc: Root qdisc from userspace point of view
1693 * @tx_queue_len: Max frames per queue allowed
1694 * @tx_global_lock: XXX: need comments on this one
1695 *
1696 * @xps_maps: XXX: need comments on this one
Jiri Pirko46209402017-11-03 11:46:25 +01001697 * @miniq_egress: clsact qdisc specific data for
1698 * egress processing
Karoly Kemeny536721b2014-07-30 20:27:36 +02001699 * @watchdog_timeo: Represents the timeout that is used by
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001700 * the watchdog (see dev_watchdog())
Karoly Kemeny536721b2014-07-30 20:27:36 +02001701 * @watchdog_timer: List of timers
1702 *
1703 * @pcpu_refcnt: Number of references to this device
1704 * @todo_list: Delayed register/unregister
Karoly Kemeny536721b2014-07-30 20:27:36 +02001705 * @link_watch_list: XXX: need comments on this one
1706 *
1707 * @reg_state: Register/unregister state machine
1708 * @dismantle: Device is going to be freed
1709 * @rtnl_link_state: This enum represents the phases of creating
1710 * a new link
1711 *
David S. Millercf124db2017-05-08 12:52:56 -04001712 * @needs_free_netdev: Should unregister perform free_netdev?
1713 * @priv_destructor: Called from unregister
Karoly Kemeny536721b2014-07-30 20:27:36 +02001714 * @npinfo: XXX: need comments on this one
1715 * @nd_net: Network namespace this network device is inside
1716 *
1717 * @ml_priv: Mid-layer private
1718 * @lstats: Loopback statistics
1719 * @tstats: Tunnel statistics
1720 * @dstats: Dummy statistics
1721 * @vstats: Virtual ethernet statistics
1722 *
1723 * @garp_port: GARP
1724 * @mrp_port: MRP
1725 *
1726 * @dev: Class/net/name entry
1727 * @sysfs_groups: Space for optional device, statistics and wireless
1728 * sysfs groups
1729 *
1730 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1731 * @rtnl_link_ops: Rtnl_link_ops
1732 *
1733 * @gso_max_size: Maximum size of generic segmentation offload
1734 * @gso_max_segs: Maximum number of segments that can be passed to the
1735 * NIC for GSO
1736 *
1737 * @dcbnl_ops: Data Center Bridging netlink ops
1738 * @num_tc: Number of traffic classes in the net device
1739 * @tc_to_txq: XXX: need comments on this one
Randy Dunlap920c1cd2016-11-21 18:28:36 -08001740 * @prio_tc_map: XXX: need comments on this one
Karoly Kemeny536721b2014-07-30 20:27:36 +02001741 *
1742 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1743 *
1744 * @priomap: XXX: need comments on this one
1745 * @phydev: Physical device may attach itself
1746 * for hardware timestamping
Russell Kinge679c9c2018-03-28 15:44:16 -07001747 * @sfp_bus: attached &struct sfp_bus structure.
Karoly Kemeny536721b2014-07-30 20:27:36 +02001748 *
Eric Dumazet123b3652016-06-08 07:22:49 -07001749 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1750 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
Karoly Kemeny536721b2014-07-30 20:27:36 +02001751 *
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07001752 * @proto_down: protocol port state information can be sent to the
1753 * switch driver and used to set the phys state of the
1754 * switch port.
1755 *
Heiner Kallweit61941142018-09-24 21:58:59 +02001756 * @wol_enabled: Wake-on-LAN is enabled
1757 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 * FIXME: cleanup struct net_device such that network protocol info
1759 * moves out.
1760 */
1761
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08001762struct net_device {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 char name[IFNAMSIZ];
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001764 struct hlist_node name_hlist;
Florian Westphal6c557002017-10-02 23:50:05 +02001765 struct dev_ifalias __rcu *ifalias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 /*
1767 * I/O specific fields
1768 * FIXME: Merge these and struct ifmap into one
1769 */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001770 unsigned long mem_end;
1771 unsigned long mem_start;
1772 unsigned long base_addr;
1773 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
1775 /*
Karoly Kemeny536721b2014-07-30 20:27:36 +02001776 * Some hardware also needs these fields (state,dev_list,
1777 * napi_list,unreg_list,close_list) but they are not
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 * part of the usual set specified in Space.c.
1779 */
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 unsigned long state;
1782
Pavel Emelianov7562f872007-05-03 15:13:45 -07001783 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001784 struct list_head napi_list;
Eric Dumazet44a08732009-10-27 07:03:04 +00001785 struct list_head unreg_list;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001786 struct list_head close_list;
Salam Noureddine7866a622015-01-27 11:35:48 -08001787 struct list_head ptype_all;
1788 struct list_head ptype_specific;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001789
Veaceslav Falico2f268f12013-09-25 09:20:07 +02001790 struct {
1791 struct list_head upper;
1792 struct list_head lower;
1793 } adj_list;
1794
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001795 netdev_features_t features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001796 netdev_features_t hw_features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001797 netdev_features_t wanted_features;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001798 netdev_features_t vlan_features;
Joseph Gasparakis6a674e92012-12-07 14:14:14 +00001799 netdev_features_t hw_enc_features;
Simon Horman0d89d202013-05-23 21:02:52 +00001800 netdev_features_t mpls_features;
Alexander Duyck802ab552016-04-10 21:45:03 -04001801 netdev_features_t gso_partial_features;
Michał Mirosław04ed3e72011-01-24 15:32:47 -08001802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 int ifindex;
Nicolas Dichtel7a66bbc2015-04-02 17:07:09 +02001804 int group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
Rusty Russellc45d2862007-03-28 14:29:08 -07001806 struct net_device_stats stats;
Eric Dumazet015f0682014-03-27 08:45:56 -07001807
Eric Dumazet015f0682014-03-27 08:45:56 -07001808 atomic_long_t rx_dropped;
1809 atomic_long_t tx_dropped;
Jarod Wilson6e7333d2016-02-01 18:51:05 -05001810 atomic_long_t rx_nohandler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
David Decotignyb2d3bcf2018-01-18 09:59:13 -08001812 /* Stats to monitor link on/off, flapping */
1813 atomic_t carrier_up_count;
1814 atomic_t carrier_down_count;
1815
Johannes Bergb86e0282007-04-26 20:48:23 -07001816#ifdef CONFIG_WIRELESS_EXT
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001817 const struct iw_handler_def *wireless_handlers;
1818 struct iw_public_data *wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -07001819#endif
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001820 const struct net_device_ops *netdev_ops;
Stephen Hemminger76fd8592006-09-08 11:16:13 -07001821 const struct ethtool_ops *ethtool_ops;
David Ahern1b69c6d2015-09-29 20:07:11 -07001822#ifdef CONFIG_NET_L3_MASTER_DEV
1823 const struct l3mdev_ops *l3mdev_ops;
1824#endif
Alexander Aringf997c552016-06-15 21:20:23 +02001825#if IS_ENABLED(CONFIG_IPV6)
1826 const struct ndisc_ops *ndisc_ops;
1827#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
Shannon Nelson9cb0d212017-12-19 15:35:49 -08001829#ifdef CONFIG_XFRM_OFFLOAD
Steffen Klassertd77e38e2017-04-14 10:06:10 +02001830 const struct xfrmdev_ops *xfrmdev_ops;
1831#endif
1832
Ilya Lesokhina5c37c62018-04-30 10:16:13 +03001833#if IS_ENABLED(CONFIG_TLS_DEVICE)
1834 const struct tlsdev_ops *tlsdev_ops;
1835#endif
1836
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001837 const struct header_ops *header_ops;
1838
Karoly Kemeny536721b2014-07-30 20:27:36 +02001839 unsigned int flags;
1840 unsigned int priv_flags;
1841
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 unsigned short gflags;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001843 unsigned short padded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Karoly Kemeny536721b2014-07-30 20:27:36 +02001845 unsigned char operstate;
1846 unsigned char link_mode;
Stefan Rompfb00055a2006-03-20 17:09:11 -08001847
Karoly Kemeny536721b2014-07-30 20:27:36 +02001848 unsigned char if_port;
1849 unsigned char dma;
Joe Perchesbdc220d2011-05-09 17:42:46 +00001850
Karoly Kemeny536721b2014-07-30 20:27:36 +02001851 unsigned int mtu;
Jarod Wilson61e84622016-10-07 22:04:33 -04001852 unsigned int min_mtu;
1853 unsigned int max_mtu;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001854 unsigned short type;
1855 unsigned short hard_header_len;
Alexey Dobriyand92be7a2017-04-10 11:25:26 +03001856 unsigned char min_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
Johannes Bergf5184d22008-05-12 20:48:31 -07001858 unsigned short needed_headroom;
1859 unsigned short needed_tailroom;
1860
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 /* Interface address info. */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001862 unsigned char perm_addr[MAX_ADDR_LEN];
1863 unsigned char addr_assign_type;
1864 unsigned char addr_len;
Sebastian Siewiora0a96632013-12-12 10:15:59 +01001865 unsigned short neigh_priv_len;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001866 unsigned short dev_id;
1867 unsigned short dev_port;
Jiri Pirkoccffad252009-05-22 23:22:17 +00001868 spinlock_t addr_list_lock;
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001869 unsigned char name_assign_type;
1870 bool uc_promisc;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001871 struct netdev_hw_addr_list uc;
1872 struct netdev_hw_addr_list mc;
1873 struct netdev_hw_addr_list dev_addrs;
1874
Eric Dumazet4c3d5e72013-03-30 06:31:03 +00001875#ifdef CONFIG_SYSFS
1876 struct kset *queues_kset;
1877#endif
Wang Chen9d45abe2008-06-17 21:12:48 -07001878 unsigned int promiscuity;
1879 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05001882 /* Protocol-specific pointers */
Jesse Gross65ac6a52010-10-20 13:56:05 +00001883
Ben Hutchingsd11ead72011-11-25 14:40:26 +00001884#if IS_ENABLED(CONFIG_VLAN_8021Q)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001885 struct vlan_info __rcu *vlan_info;
Jesse Gross65ac6a52010-10-20 13:56:05 +00001886#endif
Ben Hutchings34a430d2011-11-25 14:38:38 +00001887#if IS_ENABLED(CONFIG_NET_DSA)
Vivien Didelot2f657a62017-09-29 17:19:20 -04001888 struct dsa_port *dsa_ptr;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001889#endif
Ying Xue37cb0622013-12-10 20:45:41 -08001890#if IS_ENABLED(CONFIG_TIPC)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001891 struct tipc_bearer __rcu *tipc_ptr;
Ying Xue37cb0622013-12-10 20:45:41 -08001892#endif
David Ahern89e58142018-02-13 08:52:02 -08001893#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001894 void *atalk_ptr;
David Ahern89e58142018-02-13 08:52:02 -08001895#endif
Karoly Kemeny536721b2014-07-30 20:27:36 +02001896 struct in_device __rcu *ip_ptr;
David Ahern330c7272018-02-13 08:52:00 -08001897#if IS_ENABLED(CONFIG_DECNET)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001898 struct dn_dev __rcu *dn_ptr;
David Ahern330c7272018-02-13 08:52:00 -08001899#endif
Karoly Kemeny536721b2014-07-30 20:27:36 +02001900 struct inet6_dev __rcu *ip6_ptr;
David Ahern19ff13f2018-02-13 08:52:01 -08001901#if IS_ENABLED(CONFIG_AX25)
Karoly Kemeny536721b2014-07-30 20:27:36 +02001902 void *ax25_ptr;
David Ahern19ff13f2018-02-13 08:52:01 -08001903#endif
Karoly Kemeny536721b2014-07-30 20:27:36 +02001904 struct wireless_dev *ieee80211_ptr;
Alexander Aring98a18b62014-11-02 06:44:54 +01001905 struct wpan_dev *ieee802154_ptr;
Robert Shearman03c57742015-04-22 11:14:37 +01001906#if IS_ENABLED(CONFIG_MPLS_ROUTING)
1907 struct mpls_dev __rcu *mpls_ptr;
1908#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001910/*
Eric Dumazetcd135392010-09-16 02:58:13 +00001911 * Cache lines mostly used on receive path (including eth_type_trans())
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001912 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001913 /* Interface address info used in eth_type_trans() */
Karoly Kemeny536721b2014-07-30 20:27:36 +02001914 unsigned char *dev_addr;
Jiri Pirkof001fde2009-05-05 02:48:28 +00001915
Tom Herbert0a9627f2010-03-16 08:03:29 +00001916 struct netdev_rx_queue *_rx;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001917 unsigned int num_rx_queues;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001918 unsigned int real_num_rx_queues;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001919
Eric Dumazet7acedaf2017-04-25 11:36:52 -07001920 struct bpf_prog __rcu *xdp_prog;
Eric Dumazet3b47d302014-11-06 21:09:44 -08001921 unsigned long gro_flush_timeout;
stephen hemminger61391cd2010-11-15 06:38:12 +00001922 rx_handler_func_t __rcu *rx_handler;
1923 void __rcu *rx_handler_data;
David S. Millere8a04642008-07-17 00:34:19 -07001924
Daniel Borkmann4cda01e2015-05-11 19:28:49 +02001925#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko46209402017-11-03 11:46:25 +01001926 struct mini_Qdisc __rcu *miniq_ingress;
Daniel Borkmannd2788d32015-05-09 22:51:32 +02001927#endif
Eric Dumazet24824a02010-10-02 06:11:55 +00001928 struct netdev_queue __rcu *ingress_queue;
Pablo Neirae687ad62015-05-13 18:19:38 +02001929#ifdef CONFIG_NETFILTER_INGRESS
Aaron Conole960632e2017-08-24 00:08:32 +02001930 struct nf_hook_entries __rcu *nf_hooks_ingress;
Pablo Neirae687ad62015-05-13 18:19:38 +02001931#endif
Daniel Borkmannd2788d32015-05-09 22:51:32 +02001932
Karoly Kemeny536721b2014-07-30 20:27:36 +02001933 unsigned char broadcast[MAX_ADDR_LEN];
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001934#ifdef CONFIG_RFS_ACCEL
1935 struct cpu_rmap *rx_cpu_rmap;
1936#endif
1937 struct hlist_node index_hlist;
Eric Dumazetcd135392010-09-16 02:58:13 +00001938
1939/*
1940 * Cache lines mostly used on transmit path
1941 */
David S. Millere8a04642008-07-17 00:34:19 -07001942 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1943 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001944 unsigned int real_num_tx_queues;
Patrick McHardyaf356af2009-09-04 06:41:18 +00001945 struct Qdisc *qdisc;
Jiri Kosina59cc1f62016-08-10 11:05:15 +02001946#ifdef CONFIG_NET_SCHED
1947 DECLARE_HASHTABLE (qdisc_hash, 4);
1948#endif
Alexey Dobriyan0cd29502017-05-17 13:30:44 +03001949 unsigned int tx_queue_len;
David S. Millerc3f26a22008-07-31 16:58:50 -07001950 spinlock_t tx_global_lock;
Thomas Graf14ffbbb2015-04-10 15:52:38 +02001951 int watchdog_timeo;
Eric Dumazetcd135392010-09-16 02:58:13 +00001952
Tom Herbertbf264142010-11-26 08:36:09 +00001953#ifdef CONFIG_XPS
Amritha Nambiar80d19662018-06-29 21:26:41 -07001954 struct xps_dev_maps __rcu *xps_cpus_map;
1955 struct xps_dev_maps __rcu *xps_rxqs_map;
Tom Herbertbf264142010-11-26 08:36:09 +00001956#endif
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001957#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko46209402017-11-03 11:46:25 +01001958 struct mini_Qdisc __rcu *miniq_egress;
Daniel Borkmann1f211a12016-01-07 22:29:47 +01001959#endif
Scott Feldman0c4f6912015-07-18 18:24:48 -07001960
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001961 /* These may be needed for future network-power-down code. */
Eric Dumazet9356b8f2005-09-27 15:23:16 -07001962 struct timer_list watchdog_timer;
1963
Eric Dumazet29b44332010-10-11 10:22:12 +00001964 int __percpu *pcpu_refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
Eric Dumazete014deb2009-11-17 05:59:21 +00001967 struct list_head link_watch_list;
Herbert Xu572a1032007-05-08 18:34:17 -07001968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07001970 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 NETREG_UNREGISTERING, /* called unregister_netdevice */
1972 NETREG_UNREGISTERED, /* completed unregister todo */
1973 NETREG_RELEASED, /* called free_netdev */
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08001974 NETREG_DUMMY, /* dummy device for NAPI poll */
Eric Dumazet449f4542011-05-19 12:24:16 +00001975 } reg_state:8;
1976
Karoly Kemeny536721b2014-07-30 20:27:36 +02001977 bool dismantle;
Patrick McHardya2835762010-02-26 06:34:51 +00001978
1979 enum {
1980 RTNL_LINK_INITIALIZED,
1981 RTNL_LINK_INITIALIZING,
1982 } rtnl_link_state:16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
David S. Millercf124db2017-05-08 12:52:56 -04001984 bool needs_free_netdev;
1985 void (*priv_destructor)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987#ifdef CONFIG_NETPOLL
Cong Wang5fbee842013-01-22 21:29:39 +00001988 struct netpoll_info __rcu *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989#endif
David S. Millereae792b2008-07-15 03:03:33 -07001990
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -05001991 possible_net_t nd_net;
Eric W. Biederman4a1c5372007-09-12 11:56:32 +02001992
David S. Miller49517042008-05-12 03:29:11 -07001993 /* mid-layer private */
Eric Dumazeta7855c72010-09-23 23:51:51 +00001994 union {
Karoly Kemeny536721b2014-07-30 20:27:36 +02001995 void *ml_priv;
1996 struct pcpu_lstats __percpu *lstats;
Li RongQing8f849852014-01-04 13:57:59 +08001997 struct pcpu_sw_netstats __percpu *tstats;
Karoly Kemeny536721b2014-07-30 20:27:36 +02001998 struct pcpu_dstats __percpu *dstats;
Eric Dumazeta7855c72010-09-23 23:51:51 +00001999 };
Karoly Kemeny536721b2014-07-30 20:27:36 +02002000
Tobias Klauserfb585b42017-02-10 16:43:50 +01002001#if IS_ENABLED(CONFIG_GARP)
Eric Dumazet3cc77ec2010-10-24 21:32:36 +00002002 struct garp_port __rcu *garp_port;
Tobias Klauserfb585b42017-02-10 16:43:50 +01002003#endif
2004#if IS_ENABLED(CONFIG_MRP)
David Wardfebf0182013-02-08 17:17:06 +00002005 struct mrp_port __rcu *mrp_port;
Tobias Klauserfb585b42017-02-10 16:43:50 +01002006#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002008 struct device dev;
Eric W. Biederman0c509a62009-10-29 14:18:21 +00002009 const struct attribute_group *sysfs_groups[4];
Michael Daltona953be52014-01-16 22:23:28 -08002010 const struct attribute_group *sysfs_rx_queue_group;
Patrick McHardy38f7b872007-06-13 12:03:51 -07002011
Patrick McHardy38f7b872007-06-13 12:03:51 -07002012 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07002013
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07002014 /* for setting kernel sock attribute on TCP connection setup */
2015#define GSO_MAX_SIZE 65536
2016 unsigned int gso_max_size;
Ben Hutchings30b678d2012-07-30 15:57:00 +00002017#define GSO_MAX_SEGS 65535
2018 u16 gso_max_segs;
Eric Dumazet743b03a2016-04-09 11:29:58 -07002019
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002020#ifdef CONFIG_DCB
Stephen Hemminger32953542009-10-05 06:01:03 +00002021 const struct dcbnl_rtnl_ops *dcbnl_ops;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002022#endif
Alexander Duyckffcfe252018-07-09 12:19:38 -04002023 s16 num_tc;
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002024 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2025 u8 prio_tc_map[TC_BITMASK + 1];
Alexander Duyck2f90b862008-11-20 20:52:10 -08002026
Ben Hutchingsd11ead72011-11-25 14:40:26 +00002027#if IS_ENABLED(CONFIG_FCOE)
Yi Zou4d288d52009-02-27 14:06:59 -08002028 unsigned int fcoe_ddp_xid;
2029#endif
Daniel Borkmann86f85152013-12-29 17:27:11 +01002030#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00002031 struct netprio_map __rcu *priomap;
2032#endif
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002033 struct phy_device *phydev;
Russell Kinge679c9c2018-03-28 15:44:16 -07002034 struct sfp_bus *sfp_bus;
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002035 struct lock_class_key *qdisc_tx_busylock;
Eric Dumazetf9eb8ae2016-06-06 09:37:15 -07002036 struct lock_class_key *qdisc_running_key;
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002037 bool proto_down;
Heiner Kallweit61941142018-09-24 21:58:59 +02002038 unsigned wol_enabled:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07002040#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
David S. Millerb5cdae32017-04-18 15:36:58 -04002042static inline bool netif_elide_gro(const struct net_device *dev)
2043{
2044 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2045 return true;
2046 return false;
2047}
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049#define NETDEV_ALIGN 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
David S. Millere8a04642008-07-17 00:34:19 -07002051static inline
John Fastabend4f57c082011-01-17 08:06:04 +00002052int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2053{
2054 return dev->prio_tc_map[prio & TC_BITMASK];
2055}
2056
2057static inline
2058int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2059{
2060 if (tc >= dev->num_tc)
2061 return -EINVAL;
2062
2063 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2064 return 0;
2065}
2066
Alexander Duyck8d059b02016-10-28 11:43:49 -04002067int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
Alexander Duyck9cf1f6a2016-10-28 11:43:20 -04002068void netdev_reset_tc(struct net_device *dev);
2069int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2070int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
John Fastabend4f57c082011-01-17 08:06:04 +00002071
2072static inline
2073int netdev_get_num_tc(struct net_device *dev)
2074{
2075 return dev->num_tc;
2076}
2077
Alexander Duyckffcfe252018-07-09 12:19:38 -04002078void netdev_unbind_sb_channel(struct net_device *dev,
2079 struct net_device *sb_dev);
2080int netdev_bind_sb_channel_queue(struct net_device *dev,
2081 struct net_device *sb_dev,
2082 u8 tc, u16 count, u16 offset);
2083int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2084static inline int netdev_get_sb_channel(struct net_device *dev)
2085{
2086 return max_t(int, -dev->num_tc, 0);
2087}
2088
John Fastabend4f57c082011-01-17 08:06:04 +00002089static inline
David S. Millere8a04642008-07-17 00:34:19 -07002090struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2091 unsigned int index)
2092{
2093 return &dev->_tx[index];
2094}
2095
Daniel Borkmann10c51b56232014-08-27 11:11:27 +02002096static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2097 const struct sk_buff *skb)
2098{
2099 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2100}
2101
David S. Millere8a04642008-07-17 00:34:19 -07002102static inline void netdev_for_each_tx_queue(struct net_device *dev,
2103 void (*f)(struct net_device *,
2104 struct netdev_queue *,
2105 void *),
2106 void *arg)
2107{
2108 unsigned int i;
2109
2110 for (i = 0; i < dev->num_tx_queues; i++)
2111 f(dev, &dev->_tx[i], arg);
2112}
2113
Eric Dumazetd3fff6c2016-06-09 07:45:12 -07002114#define netdev_lockdep_set_classes(dev) \
2115{ \
2116 static struct lock_class_key qdisc_tx_busylock_key; \
2117 static struct lock_class_key qdisc_running_key; \
2118 static struct lock_class_key qdisc_xmit_lock_key; \
2119 static struct lock_class_key dev_addr_list_lock_key; \
2120 unsigned int i; \
2121 \
2122 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2123 (dev)->qdisc_running_key = &qdisc_running_key; \
2124 lockdep_set_class(&(dev)->addr_list_lock, \
2125 &dev_addr_list_lock_key); \
2126 for (i = 0; i < (dev)->num_tx_queues; i++) \
2127 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2128 &qdisc_xmit_lock_key); \
2129}
2130
Paolo Abenib71b5832019-03-20 11:02:05 +01002131u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2132 struct net_device *sb_dev);
Paolo Abeni4bd97d52019-03-20 11:02:04 +01002133struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2134 struct sk_buff *skb,
2135 struct net_device *sb_dev);
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002136
Paolo Abeni871b6422016-02-26 10:45:37 +01002137/* returns the headroom that the master device needs to take in account
2138 * when forwarding to this dev
2139 */
2140static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2141{
2142 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2143}
2144
2145static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2146{
2147 if (dev->netdev_ops->ndo_set_rx_headroom)
2148 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2149}
2150
2151/* set the device rx headroom to the dev's default */
2152static inline void netdev_reset_rx_headroom(struct net_device *dev)
2153{
2154 netdev_set_rx_headroom(dev, -1);
2155}
2156
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002157/*
2158 * Net namespace inlines
2159 */
2160static inline
2161struct net *dev_net(const struct net_device *dev)
2162{
Eric Dumazetc2d9ba92010-06-01 06:51:19 +00002163 return read_pnet(&dev->nd_net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002164}
2165
2166static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -07002167void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002168{
Eric W. Biederman0c5c9fb2015-03-11 23:06:44 -05002169 write_pnet(&dev->nd_net, net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002170}
2171
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002172/**
2173 * netdev_priv - access network device private data
2174 * @dev: network device
2175 *
2176 * Get network device private data
2177 */
Patrick McHardy6472ce62007-06-13 12:03:21 -07002178static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179{
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00002180 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181}
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183/* Set the sysfs physical device reference for the network logical device
2184 * if set prior to registration will cause a symlink during initialization.
2185 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07002186#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Marcel Holtmann384912e2009-08-31 21:08:19 +00002188/* Set the sysfs device type for the network logical device to allow
Maxime Jayat3f794102013-10-12 01:29:46 +02002189 * fine-grained identification of different network device types. For
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002190 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
Marcel Holtmann384912e2009-08-31 21:08:19 +00002191 */
2192#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2193
Eric Dumazet82dc3c63c2013-03-05 15:57:22 +00002194/* Default NAPI poll() weight
2195 * Device drivers are strongly advised to not use bigger value
2196 */
2197#define NAPI_POLL_WEIGHT 64
2198
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002199/**
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002200 * netif_napi_add - initialize a NAPI context
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002201 * @dev: network device
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002202 * @napi: NAPI context
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002203 * @poll: polling function
2204 * @weight: default weight
2205 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002206 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2207 * *any* of the other NAPI-related functions.
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002208 */
Herbert Xud565b0a2008-12-15 23:38:52 -08002209void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2210 int (*poll)(struct napi_struct *, int), int weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002211
Alexander Duyckd8156532008-07-08 15:13:05 -07002212/**
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002213 * netif_tx_napi_add - initialize a NAPI context
Eric Dumazetd64b5e82015-11-18 06:31:00 -08002214 * @dev: network device
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002215 * @napi: NAPI context
Eric Dumazetd64b5e82015-11-18 06:31:00 -08002216 * @poll: polling function
2217 * @weight: default weight
2218 *
2219 * This variant of netif_napi_add() should be used from drivers using NAPI
2220 * to exclusively poll a TX queue.
2221 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2222 */
2223static inline void netif_tx_napi_add(struct net_device *dev,
2224 struct napi_struct *napi,
2225 int (*poll)(struct napi_struct *, int),
2226 int weight)
2227{
2228 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2229 netif_napi_add(dev, napi, poll, weight);
2230}
2231
2232/**
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002233 * netif_napi_del - remove a NAPI context
2234 * @napi: NAPI context
Alexander Duyckd8156532008-07-08 15:13:05 -07002235 *
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002236 * netif_napi_del() removes a NAPI context from the network device NAPI list
Alexander Duyckd8156532008-07-08 15:13:05 -07002237 */
Herbert Xud565b0a2008-12-15 23:38:52 -08002238void netif_napi_del(struct napi_struct *napi);
2239
2240struct napi_gro_cb {
Herbert Xu78a478d2009-05-26 18:50:21 +00002241 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002242 void *frag0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002243
Herbert Xu74895942009-05-26 18:50:27 +00002244 /* Length of frag0. */
2245 unsigned int frag0_len;
2246
Herbert Xu86911732009-01-29 14:19:50 +00002247 /* This indicates where we are processing relative to skb->data. */
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002248 int data_offset;
Herbert Xu86911732009-01-29 14:19:50 +00002249
Herbert Xud565b0a2008-12-15 23:38:52 -08002250 /* This is non-zero if the packet cannot be merged with the new skb. */
Jerry Chubf5a7552014-01-07 10:23:19 -08002251 u16 flush;
2252
2253 /* Save the IP ID here and check when we get to the transport layer */
2254 u16 flush_id;
Herbert Xud565b0a2008-12-15 23:38:52 -08002255
2256 /* Number of segments aggregated. */
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00002257 u16 count;
2258
Tom Herbert15e23962015-02-10 16:30:31 -08002259 /* Start offset for remote checksum offload */
2260 u16 gro_remcsum_start;
2261
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00002262 /* jiffies when first packet was created/queued */
2263 unsigned long age;
Eric Dumazet86347242012-10-08 21:38:50 +02002264
Tom Herbertafe93322014-09-17 12:25:57 -07002265 /* Used in ipv6_gro_receive() and foo-over-udp */
Or Gerlitzb582ef02014-01-20 13:59:19 +02002266 u16 proto;
2267
Tom Herbertbaa32ff2015-02-10 16:30:30 -08002268 /* This is non-zero if the packet may be of the same flow. */
2269 u8 same_flow:1;
2270
Jesse Grossfac8e0f2016-03-19 09:32:01 -07002271 /* Used in tunnel GRO receive */
2272 u8 encap_mark:1;
Tom Herbert573e8fc2014-08-22 13:33:47 -07002273
2274 /* GRO checksum is valid */
2275 u8 csum_valid:1;
2276
Tom Herbert662880f2014-08-27 21:26:56 -07002277 /* Number of checksums via CHECKSUM_UNNECESSARY */
2278 u8 csum_cnt:3;
Eric Dumazetc3c7c252012-12-06 13:54:59 +00002279
Tom Herbertbaa32ff2015-02-10 16:30:30 -08002280 /* Free the skb? */
2281 u8 free:2;
2282#define NAPI_GRO_FREE 1
2283#define NAPI_GRO_FREE_STOLEN_HEAD 2
2284
Tom Herbertefc98d02014-10-03 15:48:08 -07002285 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2286 u8 is_ipv6:1;
2287
Alexander Duycka0ca1532016-04-05 09:13:39 -07002288 /* Used in GRE, set in fou/gue_gro_receive */
2289 u8 is_fou:1;
2290
Alexander Duyck15305452016-04-10 21:44:57 -04002291 /* Used to determine if flush_id can be ignored */
2292 u8 is_atomic:1;
2293
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002294 /* Number of gro_receive callbacks this packet already went through */
2295 u8 recursion_counter:4;
2296
2297 /* 1 bit hole */
Tom Herbertbaa32ff2015-02-10 16:30:30 -08002298
Jerry Chubf5a7552014-01-07 10:23:19 -08002299 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2300 __wsum csum;
2301
Eric Dumazetc3c7c252012-12-06 13:54:59 +00002302 /* used in skb_gro_receive() slow path */
2303 struct sk_buff *last;
Herbert Xud565b0a2008-12-15 23:38:52 -08002304};
2305
2306#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
Alexander Duyckd8156532008-07-08 15:13:05 -07002307
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002308#define GRO_RECURSION_LIMIT 15
2309static inline int gro_recursion_inc_test(struct sk_buff *skb)
2310{
2311 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2312}
2313
David Millerd4546c22018-06-24 14:13:49 +09002314typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2315static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2316 struct list_head *head,
2317 struct sk_buff *skb)
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002318{
2319 if (unlikely(gro_recursion_inc_test(skb))) {
2320 NAPI_GRO_CB(skb)->flush |= 1;
2321 return NULL;
2322 }
2323
2324 return cb(head, skb);
2325}
2326
David Millerd4546c22018-06-24 14:13:49 +09002327typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2328 struct sk_buff *);
2329static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2330 struct sock *sk,
2331 struct list_head *head,
2332 struct sk_buff *skb)
Sabrina Dubrocafcd91dd2016-10-20 15:58:02 +02002333{
2334 if (unlikely(gro_recursion_inc_test(skb))) {
2335 NAPI_GRO_CB(skb)->flush |= 1;
2336 return NULL;
2337 }
2338
2339 return cb(sk, head, skb);
2340}
2341
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002343 __be16 type; /* This is really htons(ether_type). */
Vincent Whitchurchfa788d92018-09-03 16:23:36 +02002344 bool ignore_outgoing;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002345 struct net_device *dev; /* NULL is wildcarded here */
2346 int (*func) (struct sk_buff *,
2347 struct net_device *,
2348 struct packet_type *,
2349 struct net_device *);
Edward Cree17266ee2018-07-02 16:14:12 +01002350 void (*list_func) (struct list_head *,
2351 struct packet_type *,
2352 struct net_device *);
Eric Leblondc0de08d2012-08-16 22:02:58 +00002353 bool (*id_match)(struct packet_type *ptype,
2354 struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 void *af_packet_priv;
2356 struct list_head list;
2357};
2358
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002359struct offload_callbacks {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2361 netdev_features_t features);
David Millerd4546c22018-06-24 14:13:49 +09002362 struct sk_buff *(*gro_receive)(struct list_head *head,
2363 struct sk_buff *skb);
Jerry Chu299603e82013-12-11 20:53:45 -08002364 int (*gro_complete)(struct sk_buff *skb, int nhoff);
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002365};
2366
2367struct packet_offload {
2368 __be16 type; /* This is really htons(ether_type). */
David S. Millerbdef7de2015-06-01 14:56:09 -07002369 u16 priority;
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00002370 struct offload_callbacks callbacks;
2371 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372};
2373
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002374/* often modified stats are per-CPU, other are shared (netdev->stats) */
Li RongQing8f849852014-01-04 13:57:59 +08002375struct pcpu_sw_netstats {
2376 u64 rx_packets;
2377 u64 rx_bytes;
2378 u64 tx_packets;
2379 u64 tx_bytes;
2380 struct u64_stats_sync syncp;
Eric Dumazet9a5ee462018-11-16 07:24:24 -08002381} __aligned(4 * sizeof(u64));
Li RongQing8f849852014-01-04 13:57:59 +08002382
Li RongQing52bb6672018-09-14 16:00:51 +08002383struct pcpu_lstats {
2384 u64 packets;
2385 u64 bytes;
2386 struct u64_stats_sync syncp;
Eric Dumazet9a5ee462018-11-16 07:24:24 -08002387} __aligned(2 * sizeof(u64));
Li RongQing52bb6672018-09-14 16:00:51 +08002388
Pablo Neira Ayusoaabc92b2015-11-10 14:31:18 +01002389#define __netdev_alloc_pcpu_stats(type, gfp) \
2390({ \
2391 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2392 if (pcpu_stats) { \
2393 int __cpu; \
2394 for_each_possible_cpu(__cpu) { \
2395 typeof(type) *stat; \
2396 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2397 u64_stats_init(&stat->syncp); \
2398 } \
2399 } \
2400 pcpu_stats; \
WANG Cong1c213bd2014-02-13 11:46:28 -08002401})
2402
Pablo Neira Ayusoaabc92b2015-11-10 14:31:18 +01002403#define netdev_alloc_pcpu_stats(type) \
Felix Fietkau326fcfa2015-12-05 13:58:11 +01002404 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
Pablo Neira Ayusoaabc92b2015-11-10 14:31:18 +01002405
Jiri Pirko764f5e52015-12-03 12:12:12 +01002406enum netdev_lag_tx_type {
2407 NETDEV_LAG_TX_TYPE_UNKNOWN,
2408 NETDEV_LAG_TX_TYPE_RANDOM,
2409 NETDEV_LAG_TX_TYPE_BROADCAST,
2410 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2411 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2412 NETDEV_LAG_TX_TYPE_HASH,
2413};
2414
John Hurleyf44aa9e2018-05-23 19:22:52 -07002415enum netdev_lag_hash {
2416 NETDEV_LAG_HASH_NONE,
2417 NETDEV_LAG_HASH_L2,
2418 NETDEV_LAG_HASH_L34,
2419 NETDEV_LAG_HASH_L23,
2420 NETDEV_LAG_HASH_E23,
2421 NETDEV_LAG_HASH_E34,
2422 NETDEV_LAG_HASH_UNKNOWN,
2423};
2424
Jiri Pirko764f5e52015-12-03 12:12:12 +01002425struct netdev_lag_upper_info {
2426 enum netdev_lag_tx_type tx_type;
John Hurleyf44aa9e2018-05-23 19:22:52 -07002427 enum netdev_lag_hash hash_type;
Jiri Pirko764f5e52015-12-03 12:12:12 +01002428};
2429
Jiri Pirkofb1b2e32015-12-03 12:12:16 +01002430struct netdev_lag_lower_state_info {
2431 u8 link_up : 1,
2432 tx_enabled : 1;
2433};
2434
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435#include <linux/notifier.h>
2436
Kirill Tkhaiede27622018-03-23 19:47:19 +03002437/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
2438 * and the rtnetlink notification exclusion list in rtnetlink_event() when
2439 * adding new types.
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002440 */
Kirill Tkhaiede27622018-03-23 19:47:19 +03002441enum netdev_cmd {
2442 NETDEV_UP = 1, /* For now you can't veto a device up/down */
2443 NETDEV_DOWN,
2444 NETDEV_REBOOT, /* Tell a protocol stack a network interface
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002445 detected a hardware crash and restarted
2446 - we can use this eg to kick tcp sessions
2447 once done */
Kirill Tkhaiede27622018-03-23 19:47:19 +03002448 NETDEV_CHANGE, /* Notify device state change */
2449 NETDEV_REGISTER,
2450 NETDEV_UNREGISTER,
2451 NETDEV_CHANGEMTU, /* notify after mtu change happened */
Petr Machata15704152018-12-13 11:54:33 +00002452 NETDEV_CHANGEADDR, /* notify after the address change */
2453 NETDEV_PRE_CHANGEADDR, /* notify before the address change */
Kirill Tkhaiede27622018-03-23 19:47:19 +03002454 NETDEV_GOING_DOWN,
2455 NETDEV_CHANGENAME,
2456 NETDEV_FEAT_CHANGE,
2457 NETDEV_BONDING_FAILOVER,
2458 NETDEV_PRE_UP,
2459 NETDEV_PRE_TYPE_CHANGE,
2460 NETDEV_POST_TYPE_CHANGE,
2461 NETDEV_POST_INIT,
Kirill Tkhaiede27622018-03-23 19:47:19 +03002462 NETDEV_RELEASE,
2463 NETDEV_NOTIFY_PEERS,
2464 NETDEV_JOIN,
2465 NETDEV_CHANGEUPPER,
2466 NETDEV_RESEND_IGMP,
2467 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
2468 NETDEV_CHANGEINFODATA,
2469 NETDEV_BONDING_INFO,
2470 NETDEV_PRECHANGEUPPER,
2471 NETDEV_CHANGELOWERSTATE,
2472 NETDEV_UDP_TUNNEL_PUSH_INFO,
2473 NETDEV_UDP_TUNNEL_DROP_INFO,
2474 NETDEV_CHANGE_TX_QUEUE_LEN,
Gal Pressman9daae9b2018-03-28 17:46:54 +03002475 NETDEV_CVLAN_FILTER_PUSH_INFO,
2476 NETDEV_CVLAN_FILTER_DROP_INFO,
2477 NETDEV_SVLAN_FILTER_PUSH_INFO,
2478 NETDEV_SVLAN_FILTER_DROP_INFO,
Kirill Tkhaiede27622018-03-23 19:47:19 +03002479};
2480const char *netdev_cmd_to_name(enum netdev_cmd cmd);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002481
Joe Perchesf629d202013-09-26 14:48:15 -07002482int register_netdevice_notifier(struct notifier_block *nb);
2483int unregister_netdevice_notifier(struct notifier_block *nb);
Jiri Pirko351638e2013-05-28 01:30:21 +00002484
2485struct netdev_notifier_info {
David Ahern51d0c0472017-10-04 17:48:45 -07002486 struct net_device *dev;
2487 struct netlink_ext_ack *extack;
Jiri Pirko351638e2013-05-28 01:30:21 +00002488};
2489
Sabrina Dubrocaaf7d6cc2018-10-09 17:48:14 +02002490struct netdev_notifier_info_ext {
2491 struct netdev_notifier_info info; /* must be first */
2492 union {
2493 u32 mtu;
2494 } ext;
2495};
2496
Jiri Pirkobe9efd32013-05-28 01:30:22 +00002497struct netdev_notifier_change_info {
2498 struct netdev_notifier_info info; /* must be first */
2499 unsigned int flags_changed;
2500};
2501
Jiri Pirko0e4ead92015-08-27 09:31:18 +02002502struct netdev_notifier_changeupper_info {
2503 struct netdev_notifier_info info; /* must be first */
2504 struct net_device *upper_dev; /* new upper dev */
2505 bool master; /* is upper dev master */
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002506 bool linking; /* is the notification for link or unlink */
Jiri Pirko29bf24a2015-12-03 12:12:11 +01002507 void *upper_info; /* upper dev info */
Jiri Pirko0e4ead92015-08-27 09:31:18 +02002508};
2509
Jiri Pirko04d48262015-12-03 12:12:15 +01002510struct netdev_notifier_changelowerstate_info {
2511 struct netdev_notifier_info info; /* must be first */
2512 void *lower_state_info; /* is lower dev state */
2513};
2514
Petr Machata15704152018-12-13 11:54:33 +00002515struct netdev_notifier_pre_changeaddr_info {
2516 struct netdev_notifier_info info; /* must be first */
2517 const unsigned char *dev_addr;
2518};
2519
Cong Wang75538c22013-05-29 11:30:50 +08002520static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2521 struct net_device *dev)
2522{
2523 info->dev = dev;
David Ahern51d0c0472017-10-04 17:48:45 -07002524 info->extack = NULL;
Cong Wang75538c22013-05-29 11:30:50 +08002525}
2526
Jiri Pirko351638e2013-05-28 01:30:21 +00002527static inline struct net_device *
2528netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2529{
2530 return info->dev;
2531}
2532
David Ahern51d0c0472017-10-04 17:48:45 -07002533static inline struct netlink_ext_ack *
2534netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2535{
2536 return info->extack;
2537}
2538
Joe Perchesf629d202013-09-26 14:48:15 -07002539int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
Amerigo Wangdcfe1422011-07-25 17:13:09 -07002540
2541
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542extern rwlock_t dev_base_lock; /* Device list lock */
2543
Eric W. Biederman881d9662007-09-17 11:56:21 -07002544#define for_each_netdev(net, d) \
2545 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
Eric W. Biedermandcbccbd42009-11-29 22:25:26 +00002546#define for_each_netdev_reverse(net, d) \
2547 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08002548#define for_each_netdev_rcu(net, d) \
2549 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002550#define for_each_netdev_safe(net, d, n) \
2551 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2552#define for_each_netdev_continue(net, d) \
2553 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
stephen hemminger254245d2009-11-10 07:54:47 +00002554#define for_each_netdev_continue_rcu(net, d) \
2555 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00002556#define for_each_netdev_in_bond_rcu(bond, slave) \
2557 for_each_netdev_rcu(&init_net, slave) \
Benjamin Poirier4ccce022015-01-14 16:52:35 +09002558 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
Pavel Emelianov7562f872007-05-03 15:13:45 -07002559#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2560
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002561static inline struct net_device *next_net_device(struct net_device *dev)
2562{
2563 struct list_head *lh;
2564 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002565
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002566 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002567 lh = dev->dev_list.next;
2568 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2569}
2570
Eric Dumazetce81b762009-11-11 17:34:30 +00002571static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2572{
2573 struct list_head *lh;
2574 struct net *net;
2575
2576 net = dev_net(dev);
Eric Dumazetccf43432011-01-26 18:08:02 +00002577 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
Eric Dumazetce81b762009-11-11 17:34:30 +00002578 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2579}
2580
Daniel Lezcanoa050c332007-09-12 14:57:09 +02002581static inline struct net_device *first_net_device(struct net *net)
2582{
2583 return list_empty(&net->dev_base_head) ? NULL :
2584 net_device_entry(net->dev_base_head.next);
2585}
Pavel Emelianov7562f872007-05-03 15:13:45 -07002586
Eric Dumazetccf43432011-01-26 18:08:02 +00002587static inline struct net_device *first_net_device_rcu(struct net *net)
2588{
2589 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2590
2591 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2592}
2593
Joe Perchesf629d202013-09-26 14:48:15 -07002594int netdev_boot_setup_check(struct net_device *dev);
2595unsigned long netdev_boot_base(const char *prefix, int unit);
2596struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2597 const char *hwaddr);
2598struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2599struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2600void dev_add_pack(struct packet_type *pt);
2601void dev_remove_pack(struct packet_type *pt);
2602void __dev_remove_pack(struct packet_type *pt);
2603void dev_add_offload(struct packet_offload *po);
2604void dev_remove_offload(struct packet_offload *po);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605
Nicolas Dichtela54acb32015-04-02 17:07:00 +02002606int dev_get_iflink(const struct net_device *dev);
Pravin B Shelarfc4099f2015-10-22 18:17:16 -07002607int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
WANG Cong6c555492014-09-11 15:35:09 -07002608struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2609 unsigned short mask);
Joe Perchesf629d202013-09-26 14:48:15 -07002610struct net_device *dev_get_by_name(struct net *net, const char *name);
2611struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2612struct net_device *__dev_get_by_name(struct net *net, const char *name);
2613int dev_alloc_name(struct net_device *dev, const char *name);
Petr Machata00f54e62018-12-06 17:05:36 +00002614int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
stephen hemminger7051b882017-07-18 15:59:27 -07002615void dev_close(struct net_device *dev);
2616void dev_close_many(struct list_head *head, bool unlink);
Joe Perchesf629d202013-09-26 14:48:15 -07002617void dev_disable_lro(struct net_device *dev);
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -05002618int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
Alexander Duycka4ea8a32018-07-09 12:19:54 -04002619u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +01002620 struct net_device *sb_dev);
Alexander Duycka4ea8a32018-07-09 12:19:54 -04002621u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
Paolo Abenia350ecc2019-03-20 11:02:06 +01002622 struct net_device *sb_dev);
Eric W. Biederman2b4aa3c2015-09-15 20:04:07 -05002623int dev_queue_xmit(struct sk_buff *skb);
Alexander Duyckeadec8772018-07-09 12:19:48 -04002624int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
Magnus Karlsson865b03f2018-05-02 13:01:33 +02002625int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
Joe Perchesf629d202013-09-26 14:48:15 -07002626int register_netdevice(struct net_device *dev);
2627void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2628void unregister_netdevice_many(struct list_head *head);
Eric Dumazet44a08732009-10-27 07:03:04 +00002629static inline void unregister_netdevice(struct net_device *dev)
2630{
2631 unregister_netdevice_queue(dev, NULL);
2632}
2633
Joe Perchesf629d202013-09-26 14:48:15 -07002634int netdev_refcnt_read(const struct net_device *dev);
2635void free_netdev(struct net_device *dev);
Eric Dumazet74d332c2013-10-30 13:10:44 -07002636void netdev_freemem(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07002637void synchronize_net(void);
2638int init_dummy_netdev(struct net_device *dev);
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08002639
Joe Perchesf629d202013-09-26 14:48:15 -07002640struct net_device *dev_get_by_index(struct net *net, int ifindex);
2641struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2642struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
Miroslav Lichvar90b602f2017-05-19 17:52:37 +02002643struct net_device *dev_get_by_napi_id(unsigned int napi_id);
Joe Perchesf629d202013-09-26 14:48:15 -07002644int netdev_get_name(struct net *net, char *name, int ifindex);
2645int dev_restart(struct net_device *dev);
David Millerd4546c22018-06-24 14:13:49 +09002646int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
Herbert Xu86911732009-01-29 14:19:50 +00002647
2648static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2649{
2650 return NAPI_GRO_CB(skb)->data_offset;
2651}
2652
2653static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2654{
2655 return skb->len - NAPI_GRO_CB(skb)->data_offset;
2656}
2657
2658static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2659{
2660 NAPI_GRO_CB(skb)->data_offset += len;
2661}
2662
Herbert Xua5b1cf22009-05-26 18:50:28 +00002663static inline void *skb_gro_header_fast(struct sk_buff *skb,
2664 unsigned int offset)
Herbert Xu86911732009-01-29 14:19:50 +00002665{
Herbert Xu78a478d2009-05-26 18:50:21 +00002666 return NAPI_GRO_CB(skb)->frag0 + offset;
Herbert Xu86911732009-01-29 14:19:50 +00002667}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668
Herbert Xua5b1cf22009-05-26 18:50:28 +00002669static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2670{
2671 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2672}
2673
Herbert Xu57ea52a2017-01-10 12:24:15 -08002674static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2675{
2676 NAPI_GRO_CB(skb)->frag0 = NULL;
2677 NAPI_GRO_CB(skb)->frag0_len = 0;
2678}
2679
Herbert Xua5b1cf22009-05-26 18:50:28 +00002680static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2681 unsigned int offset)
2682{
Herbert Xu17dd7592011-07-27 06:16:28 -07002683 if (!pskb_may_pull(skb, hlen))
2684 return NULL;
2685
Herbert Xu57ea52a2017-01-10 12:24:15 -08002686 skb_gro_frag0_invalidate(skb);
Herbert Xu17dd7592011-07-27 06:16:28 -07002687 return skb->data + offset;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002688}
2689
Herbert Xu36e7b1b2009-04-27 05:44:45 -07002690static inline void *skb_gro_network_header(struct sk_buff *skb)
2691{
Herbert Xu78d3fd02009-05-26 18:50:23 +00002692 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2693 skb_network_offset(skb);
Herbert Xu36e7b1b2009-04-27 05:44:45 -07002694}
2695
Jerry Chubf5a7552014-01-07 10:23:19 -08002696static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2697 const void *start, unsigned int len)
2698{
Tom Herbert573e8fc2014-08-22 13:33:47 -07002699 if (NAPI_GRO_CB(skb)->csum_valid)
Jerry Chubf5a7552014-01-07 10:23:19 -08002700 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2701 csum_partial(start, len, 0));
2702}
2703
Tom Herbert573e8fc2014-08-22 13:33:47 -07002704/* GRO checksum functions. These are logical equivalents of the normal
2705 * checksum functions (in skbuff.h) except that they operate on the GRO
2706 * offsets and fields in sk_buff.
2707 */
2708
2709__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2710
Tom Herbert15e23962015-02-10 16:30:31 -08002711static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2712{
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002713 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
Tom Herbert15e23962015-02-10 16:30:31 -08002714}
2715
Tom Herbert573e8fc2014-08-22 13:33:47 -07002716static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2717 bool zero_okay,
2718 __sum16 check)
2719{
Tom Herbert6edec0e2015-02-10 16:30:28 -08002720 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
2721 skb_checksum_start_offset(skb) <
2722 skb_gro_offset(skb)) &&
Tom Herbert15e23962015-02-10 16:30:31 -08002723 !skb_at_gro_remcsum_start(skb) &&
Tom Herbert662880f2014-08-27 21:26:56 -07002724 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
Tom Herbert573e8fc2014-08-22 13:33:47 -07002725 (!zero_okay || check));
2726}
2727
2728static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2729 __wsum psum)
2730{
2731 if (NAPI_GRO_CB(skb)->csum_valid &&
2732 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2733 return 0;
2734
2735 NAPI_GRO_CB(skb)->csum = psum;
2736
2737 return __skb_gro_checksum_complete(skb);
2738}
2739
Tom Herbert573e8fc2014-08-22 13:33:47 -07002740static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2741{
Tom Herbert662880f2014-08-27 21:26:56 -07002742 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2743 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2744 NAPI_GRO_CB(skb)->csum_cnt--;
2745 } else {
2746 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2747 * verified a new top level checksum or an encapsulated one
2748 * during GRO. This saves work if we fallback to normal path.
2749 */
2750 __skb_incr_checksum_unnecessary(skb);
Tom Herbert573e8fc2014-08-22 13:33:47 -07002751 }
2752}
2753
2754#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2755 compute_pseudo) \
2756({ \
2757 __sum16 __ret = 0; \
2758 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2759 __ret = __skb_gro_checksum_validate_complete(skb, \
2760 compute_pseudo(skb, proto)); \
Davide Caratti219f1d792017-05-18 15:44:39 +02002761 if (!__ret) \
Tom Herbert573e8fc2014-08-22 13:33:47 -07002762 skb_gro_incr_csum_unnecessary(skb); \
2763 __ret; \
2764})
2765
2766#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2767 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2768
2769#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2770 compute_pseudo) \
2771 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2772
2773#define skb_gro_checksum_simple_validate(skb) \
2774 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2775
Tom Herbertd96535a2014-08-31 15:12:42 -07002776static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2777{
2778 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2779 !NAPI_GRO_CB(skb)->csum_valid);
2780}
2781
2782static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2783 __sum16 check, __wsum pseudo)
2784{
2785 NAPI_GRO_CB(skb)->csum = ~pseudo;
2786 NAPI_GRO_CB(skb)->csum_valid = 1;
2787}
2788
2789#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2790do { \
2791 if (__skb_gro_checksum_convert_check(skb)) \
2792 __skb_gro_checksum_convert(skb, check, \
2793 compute_pseudo(skb, proto)); \
2794} while (0)
2795
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002796struct gro_remcsum {
2797 int offset;
2798 __wsum delta;
2799};
2800
2801static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2802{
Geert Uytterhoeven846cd662015-02-18 11:38:06 +01002803 grc->offset = 0;
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002804 grc->delta = 0;
2805}
2806
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002807static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2808 unsigned int off, size_t hdrlen,
2809 int start, int offset,
2810 struct gro_remcsum *grc,
2811 bool nopartial)
Tom Herbertdcdc8992015-02-02 16:07:34 -08002812{
2813 __wsum delta;
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002814 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
Tom Herbertdcdc8992015-02-02 16:07:34 -08002815
2816 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2817
Tom Herbert15e23962015-02-10 16:30:31 -08002818 if (!nopartial) {
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002819 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2820 return ptr;
Tom Herbert15e23962015-02-10 16:30:31 -08002821 }
2822
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002823 ptr = skb_gro_header_fast(skb, off);
2824 if (skb_gro_header_hard(skb, off + plen)) {
2825 ptr = skb_gro_header_slow(skb, off + plen, off);
2826 if (!ptr)
2827 return NULL;
2828 }
2829
2830 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2831 start, offset);
Tom Herbertdcdc8992015-02-02 16:07:34 -08002832
2833 /* Adjust skb->csum since we changed the packet */
Tom Herbertdcdc8992015-02-02 16:07:34 -08002834 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002835
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002836 grc->offset = off + hdrlen + offset;
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002837 grc->delta = delta;
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002838
2839 return ptr;
Tom Herbertdcdc8992015-02-02 16:07:34 -08002840}
2841
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002842static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2843 struct gro_remcsum *grc)
2844{
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002845 void *ptr;
2846 size_t plen = grc->offset + sizeof(u16);
2847
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002848 if (!grc->delta)
2849 return;
2850
Tom Herbertb7fe10e2015-08-19 17:07:32 -07002851 ptr = skb_gro_header_fast(skb, grc->offset);
2852 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2853 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2854 if (!ptr)
2855 return;
2856 }
2857
2858 remcsum_unadjust((__sum16 *)ptr, grc->delta);
Tom Herbert26c4f7d2015-02-10 16:30:27 -08002859}
Tom Herbertdcdc8992015-02-02 16:07:34 -08002860
Steffen Klassert25393d32017-02-15 09:39:44 +01002861#ifdef CONFIG_XFRM_OFFLOAD
David Millerd4546c22018-06-24 14:13:49 +09002862static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
Steffen Klassert25393d32017-02-15 09:39:44 +01002863{
2864 if (PTR_ERR(pp) != -EINPROGRESS)
2865 NAPI_GRO_CB(skb)->flush |= flush;
2866}
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002867static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
David S. Miller5cd3da42018-07-03 10:26:50 +09002868 struct sk_buff *pp,
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002869 int flush,
2870 struct gro_remcsum *grc)
2871{
2872 if (PTR_ERR(pp) != -EINPROGRESS) {
2873 NAPI_GRO_CB(skb)->flush |= flush;
2874 skb_gro_remcsum_cleanup(skb, grc);
2875 skb->remcsum_offload = 0;
2876 }
2877}
Steffen Klassert25393d32017-02-15 09:39:44 +01002878#else
David Millerd4546c22018-06-24 14:13:49 +09002879static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
Steffen Klassert5f114162017-02-15 09:39:39 +01002880{
2881 NAPI_GRO_CB(skb)->flush |= flush;
2882}
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002883static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
David S. Miller5cd3da42018-07-03 10:26:50 +09002884 struct sk_buff *pp,
Sabrina Dubroca603d4cf2018-06-30 17:38:55 +02002885 int flush,
2886 struct gro_remcsum *grc)
2887{
2888 NAPI_GRO_CB(skb)->flush |= flush;
2889 skb_gro_remcsum_cleanup(skb, grc);
2890 skb->remcsum_offload = 0;
2891}
Steffen Klassert25393d32017-02-15 09:39:44 +01002892#endif
Steffen Klassert5f114162017-02-15 09:39:39 +01002893
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002894static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2895 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002896 const void *daddr, const void *saddr,
Eric Dumazet95c96172012-04-15 05:58:06 +00002897 unsigned int len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002898{
Ursula Braunf1ecfd52007-10-22 16:16:14 +02002899 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002900 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002901
2902 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002903}
2904
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002905static inline int dev_parse_header(const struct sk_buff *skb,
2906 unsigned char *haddr)
2907{
2908 const struct net_device *dev = skb->dev;
2909
Patrick McHardy1b833362007-10-18 05:09:28 -07002910 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002911 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07002912 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07002913}
2914
Maxim Mikityanskiye78b2912019-02-21 12:39:58 +00002915static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
2916{
2917 const struct net_device *dev = skb->dev;
2918
2919 if (!dev->header_ops || !dev->header_ops->parse_protocol)
2920 return 0;
2921 return dev->header_ops->parse_protocol(skb);
2922}
2923
Willem de Bruijn2793a232016-03-09 21:58:32 -05002924/* ll_header must have at least hard_header_len allocated */
2925static inline bool dev_validate_header(const struct net_device *dev,
2926 char *ll_header, int len)
2927{
2928 if (likely(len >= dev->hard_header_len))
2929 return true;
Willem de Bruijn217e6fa2017-02-07 15:57:20 -05002930 if (len < dev->min_header_len)
2931 return false;
Willem de Bruijn2793a232016-03-09 21:58:32 -05002932
2933 if (capable(CAP_SYS_RAWIO)) {
2934 memset(ll_header + len, 0, dev->hard_header_len - len);
2935 return true;
2936 }
2937
2938 if (dev->header_ops && dev->header_ops->validate)
2939 return dev->header_ops->validate(ll_header, len);
2940
2941 return false;
2942}
2943
Al Viro36fd6332017-06-26 13:19:16 -04002944typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
2945 int len, int size);
Joe Perchesf629d202013-09-26 14:48:15 -07002946int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947static inline int unregister_gifconf(unsigned int family)
2948{
2949 return register_gifconf(family, NULL);
2950}
2951
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002952#ifdef CONFIG_NET_FLOW_LIMIT
Willem de Bruijn5f121b92013-06-13 15:29:38 -04002953#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
Willem de Bruijn99bbc702013-05-20 04:02:32 +00002954struct sd_flow_limit {
2955 u64 count;
2956 unsigned int num_buckets;
2957 unsigned int history_head;
2958 u16 history[FLOW_LIMIT_HISTORY];
2959 u8 buckets[];
2960};
2961
2962extern int netdev_flow_limit_table_len;
2963#endif /* CONFIG_NET_FLOW_LIMIT */
2964
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05002966 * Incoming packets are placed on per-CPU queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08002968struct softnet_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 struct list_head poll_list;
Changli Gao6e7676c2010-04-27 15:07:33 -07002970 struct sk_buff_head process_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971
Changli Gaodee42872010-05-02 05:42:16 +00002972 /* stats */
David S. Millercd7b5392010-05-02 22:27:59 -07002973 unsigned int processed;
2974 unsigned int time_squeeze;
David S. Millercd7b5392010-05-02 22:27:59 -07002975 unsigned int received_rps;
Changli Gaofd793d82010-04-15 00:16:59 -07002976#ifdef CONFIG_RPS
Eric Dumazet88751272010-04-19 05:07:33 +00002977 struct softnet_data *rps_ipi_list;
Eric Dumazet4cdb1e22014-11-02 06:00:12 -08002978#endif
2979#ifdef CONFIG_NET_FLOW_LIMIT
2980 struct sd_flow_limit __rcu *flow_limit;
2981#endif
2982 struct Qdisc *output_queue;
2983 struct Qdisc **output_queue_tailp;
2984 struct sk_buff *completion_queue;
Steffen Klassertf53c7232017-12-20 10:41:36 +01002985#ifdef CONFIG_XFRM_OFFLOAD
2986 struct sk_buff_head xfrm_backlog;
2987#endif
Florian Westphal97cdcf32019-04-01 16:42:13 +02002988 /* written and read only by owning cpu: */
2989 struct {
2990 u16 recursion;
2991 u8 more;
2992 } xmit;
Eric Dumazet4cdb1e22014-11-02 06:00:12 -08002993#ifdef CONFIG_RPS
Eric Dumazet501e7ef2016-04-26 15:30:07 -07002994 /* input_queue_head should be written by cpu owning this struct,
2995 * and only read by other cpus. Worth using a cache line.
2996 */
2997 unsigned int input_queue_head ____cacheline_aligned_in_smp;
2998
2999 /* Elements below can be accessed between CPUs for RPS/RFS */
Ying Huang966a9672017-08-08 12:30:00 +08003000 call_single_data_t csd ____cacheline_aligned_in_smp;
Eric Dumazet88751272010-04-19 05:07:33 +00003001 struct softnet_data *rps_ipi_next;
3002 unsigned int cpu;
Tom Herbert76cc8b12010-05-20 18:37:59 +00003003 unsigned int input_queue_tail;
Tom Herbert1e94d722010-03-18 17:45:44 -07003004#endif
Eric Dumazet95c96172012-04-15 05:58:06 +00003005 unsigned int dropped;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003006 struct sk_buff_head input_pkt_queue;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003007 struct napi_struct backlog;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003008
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009};
3010
Tom Herbert76cc8b12010-05-20 18:37:59 +00003011static inline void input_queue_head_incr(struct softnet_data *sd)
Tom Herbertfec5e652010-04-16 16:01:27 -07003012{
3013#ifdef CONFIG_RPS
Tom Herbert76cc8b12010-05-20 18:37:59 +00003014 sd->input_queue_head++;
3015#endif
3016}
3017
3018static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3019 unsigned int *qtail)
3020{
3021#ifdef CONFIG_RPS
3022 *qtail = ++sd->input_queue_tail;
Tom Herbertfec5e652010-04-16 16:01:27 -07003023#endif
3024}
3025
Tom Herbert0a9627f2010-03-16 08:03:29 +00003026DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Florian Westphal97cdcf32019-04-01 16:42:13 +02003028static inline int dev_recursion_level(void)
3029{
Florian Westphal28b05b92019-04-03 08:28:35 +02003030 return this_cpu_read(softnet_data.xmit.recursion);
Florian Westphal97cdcf32019-04-01 16:42:13 +02003031}
3032
3033#define XMIT_RECURSION_LIMIT 10
3034static inline bool dev_xmit_recursion(void)
3035{
3036 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3037 XMIT_RECURSION_LIMIT);
3038}
3039
3040static inline void dev_xmit_recursion_inc(void)
3041{
3042 __this_cpu_inc(softnet_data.xmit.recursion);
3043}
3044
3045static inline void dev_xmit_recursion_dec(void)
3046{
3047 __this_cpu_dec(softnet_data.xmit.recursion);
3048}
3049
Joe Perchesf629d202013-09-26 14:48:15 -07003050void __netif_schedule(struct Qdisc *q);
John Fastabend46e5da40a2014-09-12 20:04:52 -07003051void netif_schedule_queue(struct netdev_queue *txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003053static inline void netif_tx_schedule_all(struct net_device *dev)
3054{
3055 unsigned int i;
3056
3057 for (i = 0; i < dev->num_tx_queues; i++)
3058 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3059}
3060
Denys Vlasenkof9a7cbb2016-04-08 17:51:54 +02003061static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07003062{
Tom Herbert734664982011-11-28 16:32:44 +00003063 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07003064}
3065
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003066/**
3067 * netif_start_queue - allow transmit
3068 * @dev: network device
3069 *
3070 * Allow upper layers to call the device hard_start_xmit routine.
3071 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072static inline void netif_start_queue(struct net_device *dev)
3073{
David S. Millere8a04642008-07-17 00:34:19 -07003074 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075}
3076
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003077static inline void netif_tx_start_all_queues(struct net_device *dev)
3078{
3079 unsigned int i;
3080
3081 for (i = 0; i < dev->num_tx_queues; i++) {
3082 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3083 netif_tx_start_queue(txq);
3084 }
3085}
3086
John Fastabend46e5da40a2014-09-12 20:04:52 -07003087void netif_tx_wake_queue(struct netdev_queue *dev_queue);
David S. Miller79d16382008-07-08 23:14:46 -07003088
Dave Jonesd29f7492008-07-22 14:09:06 -07003089/**
3090 * netif_wake_queue - restart transmit
3091 * @dev: network device
3092 *
3093 * Allow upper layers to call the device hard_start_xmit routine.
3094 * Used for flow control when transmit resources are available.
3095 */
David S. Miller79d16382008-07-08 23:14:46 -07003096static inline void netif_wake_queue(struct net_device *dev)
3097{
David S. Millere8a04642008-07-17 00:34:19 -07003098 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099}
3100
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003101static inline void netif_tx_wake_all_queues(struct net_device *dev)
3102{
3103 unsigned int i;
3104
3105 for (i = 0; i < dev->num_tx_queues; i++) {
3106 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3107 netif_tx_wake_queue(txq);
3108 }
3109}
3110
Denys Vlasenkof9a7cbb2016-04-08 17:51:54 +02003111static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07003112{
Tom Herbert734664982011-11-28 16:32:44 +00003113 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07003114}
3115
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003116/**
3117 * netif_stop_queue - stop transmitted packets
3118 * @dev: network device
3119 *
3120 * Stop upper layers calling the device hard_start_xmit routine.
3121 * Used for flow control when transmit resources are unavailable.
3122 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123static inline void netif_stop_queue(struct net_device *dev)
3124{
David S. Millere8a04642008-07-17 00:34:19 -07003125 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126}
3127
Denys Vlasenkoa2029242015-05-11 21:17:53 +02003128void netif_tx_stop_all_queues(struct net_device *dev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003129
David S. Miller4d295152012-03-07 21:02:35 -05003130static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
Dave Jonesd29f7492008-07-22 14:09:06 -07003131{
Tom Herbert734664982011-11-28 16:32:44 +00003132 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
Dave Jonesd29f7492008-07-22 14:09:06 -07003133}
3134
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003135/**
3136 * netif_queue_stopped - test if transmit queue is flowblocked
3137 * @dev: network device
3138 *
3139 * Test if transmit queue on device is currently unable to send.
3140 */
David S. Miller4d295152012-03-07 21:02:35 -05003141static inline bool netif_queue_stopped(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142{
David S. Millere8a04642008-07-17 00:34:19 -07003143 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144}
3145
David S. Miller4d295152012-03-07 21:02:35 -05003146static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
David S. Millerc3f26a22008-07-31 16:58:50 -07003147{
Tom Herbert734664982011-11-28 16:32:44 +00003148 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3149}
3150
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +02003151static inline bool
3152netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
Tom Herbert734664982011-11-28 16:32:44 +00003153{
3154 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3155}
3156
Daniel Borkmann8e2f1a62014-04-02 20:52:57 +02003157static inline bool
3158netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3159{
3160 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3161}
3162
Eric Dumazet53511452014-10-08 08:19:27 -07003163/**
3164 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3165 * @dev_queue: pointer to transmit queue
3166 *
3167 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05003168 * to give appropriate hint to the CPU.
Eric Dumazet53511452014-10-08 08:19:27 -07003169 */
3170static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3171{
3172#ifdef CONFIG_BQL
3173 prefetchw(&dev_queue->dql.num_queued);
3174#endif
3175}
3176
3177/**
3178 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3179 * @dev_queue: pointer to transmit queue
3180 *
3181 * BQL enabled drivers might use this helper in their TX completion path,
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05003182 * to give appropriate hint to the CPU.
Eric Dumazet53511452014-10-08 08:19:27 -07003183 */
3184static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3185{
3186#ifdef CONFIG_BQL
3187 prefetchw(&dev_queue->dql.limit);
3188#endif
3189}
3190
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003191static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3192 unsigned int bytes)
3193{
Tom Herbert114cf582011-11-28 16:33:09 +00003194#ifdef CONFIG_BQL
3195 dql_queued(&dev_queue->dql, bytes);
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00003196
3197 if (likely(dql_avail(&dev_queue->dql) >= 0))
3198 return;
3199
3200 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3201
3202 /*
3203 * The XOFF flag must be set before checking the dql_avail below,
3204 * because in netdev_tx_completed_queue we update the dql_completed
3205 * before checking the XOFF flag.
3206 */
3207 smp_mb();
3208
3209 /* check again in case another CPU has just made room avail */
3210 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3211 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
Tom Herbert114cf582011-11-28 16:33:09 +00003212#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003213}
3214
Eric Dumazet3e590202018-10-31 08:39:12 -07003215/* Variant of netdev_tx_sent_queue() for drivers that are aware
3216 * that they should not test BQL status themselves.
3217 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3218 * skb of a batch.
3219 * Returns true if the doorbell must be used to kick the NIC.
3220 */
3221static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3222 unsigned int bytes,
3223 bool xmit_more)
3224{
3225 if (xmit_more) {
3226#ifdef CONFIG_BQL
3227 dql_queued(&dev_queue->dql, bytes);
3228#endif
3229 return netif_tx_queue_stopped(dev_queue);
3230 }
3231 netdev_tx_sent_queue(dev_queue, bytes);
3232 return true;
3233}
3234
Florian Fainelli0042d0c2013-09-06 16:58:00 +01003235/**
3236 * netdev_sent_queue - report the number of bytes queued to hardware
3237 * @dev: network device
3238 * @bytes: number of bytes queued to the hardware device queue
3239 *
3240 * Report the number of bytes queued for sending/completion to the network
3241 * device hardware queue. @bytes should be a good approximation and should
3242 * exactly match netdev_completed_queue() @bytes
3243 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003244static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3245{
3246 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3247}
3248
Heiner Kallweit620344c2018-11-25 14:30:29 +01003249static inline bool __netdev_sent_queue(struct net_device *dev,
3250 unsigned int bytes,
3251 bool xmit_more)
3252{
3253 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3254 xmit_more);
3255}
3256
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003257static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
Eric Dumazet95c96172012-04-15 05:58:06 +00003258 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003259{
Tom Herbert114cf582011-11-28 16:33:09 +00003260#ifdef CONFIG_BQL
Alexander Duyckb37c0fb2012-02-07 02:29:06 +00003261 if (unlikely(!bytes))
3262 return;
3263
3264 dql_completed(&dev_queue->dql, bytes);
3265
3266 /*
3267 * Without the memory barrier there is a small possiblity that
3268 * netdev_tx_sent_queue will miss the update and cause the queue to
3269 * be stopped forever
3270 */
3271 smp_mb();
3272
3273 if (dql_avail(&dev_queue->dql) < 0)
3274 return;
3275
3276 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3277 netif_schedule_queue(dev_queue);
Tom Herbert114cf582011-11-28 16:33:09 +00003278#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003279}
3280
Florian Fainelli0042d0c2013-09-06 16:58:00 +01003281/**
3282 * netdev_completed_queue - report bytes and packets completed by device
3283 * @dev: network device
3284 * @pkts: actual number of packets sent over the medium
3285 * @bytes: actual number of bytes sent over the medium
3286 *
3287 * Report the number of bytes and packets transmitted by the network device
3288 * hardware queue over the physical medium, @bytes must exactly match the
3289 * @bytes amount passed to netdev_sent_queue()
3290 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003291static inline void netdev_completed_queue(struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00003292 unsigned int pkts, unsigned int bytes)
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003293{
3294 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3295}
3296
3297static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3298{
Tom Herbert114cf582011-11-28 16:33:09 +00003299#ifdef CONFIG_BQL
Alexander Duyck5c490352012-02-07 02:29:01 +00003300 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
Tom Herbert114cf582011-11-28 16:33:09 +00003301 dql_reset(&q->dql);
3302#endif
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003303}
3304
Florian Fainelli0042d0c2013-09-06 16:58:00 +01003305/**
3306 * netdev_reset_queue - reset the packets and bytes count of a network device
3307 * @dev_queue: network device
3308 *
3309 * Reset the bytes and packet count of a network device and clear the
3310 * software flow control OFF bit for this network device
3311 */
Tom Herbertc5d67bd2011-11-28 16:32:52 +00003312static inline void netdev_reset_queue(struct net_device *dev_queue)
3313{
3314 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
David S. Millerc3f26a22008-07-31 16:58:50 -07003315}
3316
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003317/**
Daniel Borkmannb9507bd2014-02-16 15:55:21 +01003318 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3319 * @dev: network device
3320 * @queue_index: given tx queue index
3321 *
3322 * Returns 0 if given tx queue index >= number of device tx queues,
3323 * otherwise returns the originally passed tx queue index.
3324 */
3325static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3326{
3327 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3328 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3329 dev->name, queue_index,
3330 dev->real_num_tx_queues);
3331 return 0;
3332 }
3333
3334 return queue_index;
3335}
3336
3337/**
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003338 * netif_running - test if up
3339 * @dev: network device
3340 *
3341 * Test if the device has been brought up.
3342 */
David S. Miller4d295152012-03-07 21:02:35 -05003343static inline bool netif_running(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344{
3345 return test_bit(__LINK_STATE_START, &dev->state);
3346}
3347
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003348/*
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05003349 * Routines to manage the subqueues on a device. We only need start,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003350 * stop, and a check if it's stopped. All other device management is
3351 * done at the overall netdevice level.
3352 * Also test the device if we're multiqueue.
3353 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003354
3355/**
3356 * netif_start_subqueue - allow sending packets on subqueue
3357 * @dev: network device
3358 * @queue_index: sub queue index
3359 *
3360 * Start individual transmit queue of a device with multiple transmit queues.
3361 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003362static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3363{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003364 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003365
3366 netif_tx_start_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003367}
3368
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003369/**
3370 * netif_stop_subqueue - stop sending packets on subqueue
3371 * @dev: network device
3372 * @queue_index: sub queue index
3373 *
3374 * Stop individual transmit queue of a device with multiple transmit queues.
3375 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003376static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3377{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003378 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003379 netif_tx_stop_queue(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003380}
3381
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003382/**
3383 * netif_subqueue_stopped - test status of subqueue
3384 * @dev: network device
3385 * @queue_index: sub queue index
3386 *
3387 * Check individual transmit queue of a device with multiple transmit queues.
3388 */
David S. Miller4d295152012-03-07 21:02:35 -05003389static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3390 u16 queue_index)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003391{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003392 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003393
3394 return netif_tx_queue_stopped(txq);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003395}
3396
David S. Miller4d295152012-03-07 21:02:35 -05003397static inline bool netif_subqueue_stopped(const struct net_device *dev,
3398 struct sk_buff *skb)
Pavel Emelyanov668f8952007-10-21 17:01:56 -07003399{
3400 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3401}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003402
Florian Fainelli738b35c2017-01-11 21:13:02 -08003403/**
3404 * netif_wake_subqueue - allow sending packets on subqueue
3405 * @dev: network device
3406 * @queue_index: sub queue index
3407 *
3408 * Resume individual transmit queue of a device with multiple transmit queues.
3409 */
3410static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3411{
3412 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3413
3414 netif_tx_wake_queue(txq);
3415}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003416
Alexander Duyck537c00d2013-01-10 08:57:02 +00003417#ifdef CONFIG_XPS
David S. Miller53af53a2013-10-08 23:07:53 -04003418int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
Joe Perchesf629d202013-09-26 14:48:15 -07003419 u16 index);
Amritha Nambiar80d19662018-06-29 21:26:41 -07003420int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3421 u16 index, bool is_rxqs_map);
3422
3423/**
3424 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3425 * @j: CPU/Rx queue index
3426 * @mask: bitmask of all cpus/rx queues
3427 * @nr_bits: number of bits in the bitmask
3428 *
3429 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3430 */
3431static inline bool netif_attr_test_mask(unsigned long j,
3432 const unsigned long *mask,
3433 unsigned int nr_bits)
3434{
3435 cpu_max_bits_warn(j, nr_bits);
3436 return test_bit(j, mask);
3437}
3438
3439/**
3440 * netif_attr_test_online - Test for online CPU/Rx queue
3441 * @j: CPU/Rx queue index
3442 * @online_mask: bitmask for CPUs/Rx queues that are online
3443 * @nr_bits: number of bits in the bitmask
3444 *
3445 * Returns true if a CPU/Rx queue is online.
3446 */
3447static inline bool netif_attr_test_online(unsigned long j,
3448 const unsigned long *online_mask,
3449 unsigned int nr_bits)
3450{
3451 cpu_max_bits_warn(j, nr_bits);
3452
3453 if (online_mask)
3454 return test_bit(j, online_mask);
3455
3456 return (j < nr_bits);
3457}
3458
3459/**
3460 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3461 * @n: CPU/Rx queue index
3462 * @srcp: the cpumask/Rx queue mask pointer
3463 * @nr_bits: number of bits in the bitmask
3464 *
3465 * Returns >= nr_bits if no further CPUs/Rx queues set.
3466 */
3467static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3468 unsigned int nr_bits)
3469{
3470 /* -1 is a legal arg here. */
3471 if (n != -1)
3472 cpu_max_bits_warn(n, nr_bits);
3473
3474 if (srcp)
3475 return find_next_bit(srcp, nr_bits, n + 1);
3476
3477 return n + 1;
3478}
3479
3480/**
3481 * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
3482 * @n: CPU/Rx queue index
3483 * @src1p: the first CPUs/Rx queues mask pointer
3484 * @src2p: the second CPUs/Rx queues mask pointer
3485 * @nr_bits: number of bits in the bitmask
3486 *
3487 * Returns >= nr_bits if no further CPUs/Rx queues set in both.
3488 */
3489static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3490 const unsigned long *src2p,
3491 unsigned int nr_bits)
3492{
3493 /* -1 is a legal arg here. */
3494 if (n != -1)
3495 cpu_max_bits_warn(n, nr_bits);
3496
3497 if (src1p && src2p)
3498 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3499 else if (src1p)
3500 return find_next_bit(src1p, nr_bits, n + 1);
3501 else if (src2p)
3502 return find_next_bit(src2p, nr_bits, n + 1);
3503
3504 return n + 1;
3505}
Alexander Duyck537c00d2013-01-10 08:57:02 +00003506#else
3507static inline int netif_set_xps_queue(struct net_device *dev,
Michael S. Tsirkin35735402013-10-02 09:14:06 +03003508 const struct cpumask *mask,
Alexander Duyck537c00d2013-01-10 08:57:02 +00003509 u16 index)
3510{
3511 return 0;
3512}
Krzysztof Kozlowskic9fbb2d2018-08-10 10:47:43 +02003513
3514static inline int __netif_set_xps_queue(struct net_device *dev,
3515 const unsigned long *mask,
3516 u16 index, bool is_rxqs_map)
3517{
3518 return 0;
3519}
Alexander Duyck537c00d2013-01-10 08:57:02 +00003520#endif
3521
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003522/**
3523 * netif_is_multiqueue - test if device has multiple transmit queues
3524 * @dev: network device
3525 *
3526 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003527 */
David S. Miller4d295152012-03-07 21:02:35 -05003528static inline bool netif_is_multiqueue(const struct net_device *dev)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003529{
Eric Dumazeta02cec22010-09-22 20:43:57 +00003530 return dev->num_tx_queues > 1;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003531}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
Joe Perchesf629d202013-09-26 14:48:15 -07003533int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
John Fastabendf0796d52010-07-01 13:21:57 +00003534
Michael Daltona953be52014-01-16 22:23:28 -08003535#ifdef CONFIG_SYSFS
Joe Perchesf629d202013-09-26 14:48:15 -07003536int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003537#else
3538static inline int netif_set_real_num_rx_queues(struct net_device *dev,
Jakub Kicinskic29c2eb2018-07-30 20:43:51 -07003539 unsigned int rxqs)
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003540{
Jakub Kicinskic29c2eb2018-07-30 20:43:51 -07003541 dev->real_num_rx_queues = rxqs;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003542 return 0;
3543}
3544#endif
3545
Daniel Borkmann65073a62018-01-31 12:58:56 +01003546static inline struct netdev_rx_queue *
3547__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3548{
3549 return dev->_rx + rxq;
3550}
3551
Michael Daltona953be52014-01-16 22:23:28 -08003552#ifdef CONFIG_SYSFS
3553static inline unsigned int get_netdev_rx_queue_index(
3554 struct netdev_rx_queue *queue)
3555{
3556 struct net_device *dev = queue->dev;
3557 int index = queue - dev->_rx;
3558
3559 BUG_ON(index >= dev->num_rx_queues);
3560 return index;
3561}
3562#endif
3563
Yuval Mintz16917b82012-07-01 03:18:50 +00003564#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
Joe Perchesf629d202013-09-26 14:48:15 -07003565int netif_get_num_default_rss_queues(void);
Yuval Mintz16917b82012-07-01 03:18:50 +00003566
Eric Dumazete6247022013-12-05 04:45:08 -08003567enum skb_free_reason {
3568 SKB_REASON_CONSUMED,
3569 SKB_REASON_DROPPED,
3570};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571
Eric Dumazete6247022013-12-05 04:45:08 -08003572void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3573void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3574
3575/*
3576 * It is not allowed to call kfree_skb() or consume_skb() from hardware
3577 * interrupt context or with hardware interrupts being disabled.
3578 * (in_irq() || irqs_disabled())
3579 *
3580 * We provide four helpers that can be used in following contexts :
3581 *
3582 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3583 * replacing kfree_skb(skb)
3584 *
3585 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3586 * Typically used in place of consume_skb(skb) in TX completion path
3587 *
3588 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3589 * replacing kfree_skb(skb)
3590 *
3591 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3592 * and consumed a packet. Used in place of consume_skb(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 */
Eric Dumazete6247022013-12-05 04:45:08 -08003594static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3595{
3596 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3597}
3598
3599static inline void dev_consume_skb_irq(struct sk_buff *skb)
3600{
3601 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3602}
3603
3604static inline void dev_kfree_skb_any(struct sk_buff *skb)
3605{
3606 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3607}
3608
3609static inline void dev_consume_skb_any(struct sk_buff *skb)
3610{
3611 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3612}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
Jason Wang7c497472017-08-11 19:41:17 +08003614void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3615int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
Joe Perchesf629d202013-09-26 14:48:15 -07003616int netif_rx(struct sk_buff *skb);
3617int netif_rx_ni(struct sk_buff *skb);
Eric W. Biederman04eb4482015-09-15 20:04:15 -05003618int netif_receive_skb(struct sk_buff *skb);
Jesper Dangaard Brouer1c601d82017-10-16 12:19:39 +02003619int netif_receive_skb_core(struct sk_buff *skb);
Edward Creef6ad8c12018-07-02 16:12:45 +01003620void netif_receive_skb_list(struct list_head *head);
Joe Perchesf629d202013-09-26 14:48:15 -07003621gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3622void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3623struct sk_buff *napi_get_frags(struct napi_struct *napi);
3624gro_result_t napi_gro_frags(struct napi_struct *napi);
Jerry Chubf5a7552014-01-07 10:23:19 -08003625struct packet_offload *gro_find_receive_by_type(__be16 type);
3626struct packet_offload *gro_find_complete_by_type(__be16 type);
Herbert Xu76620aa2009-04-16 02:02:07 -07003627
3628static inline void napi_free_frags(struct napi_struct *napi)
3629{
3630 kfree_skb(napi->skb);
3631 napi->skb = NULL;
3632}
3633
Mahesh Bandewar24b27fc2016-09-01 22:18:34 -07003634bool netdev_is_rx_handler_busy(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07003635int netdev_rx_handler_register(struct net_device *dev,
3636 rx_handler_func_t *rx_handler,
3637 void *rx_handler_data);
3638void netdev_rx_handler_unregister(struct net_device *dev);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003639
Joe Perchesf629d202013-09-26 14:48:15 -07003640bool dev_valid_name(const char *name);
Al Viro44c02a22017-10-05 12:59:44 -04003641int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3642 bool *need_copyout);
Al Viro36fd6332017-06-26 13:19:16 -04003643int dev_ifconf(struct net *net, struct ifconf *, int);
Joe Perchesf629d202013-09-26 14:48:15 -07003644int dev_ethtool(struct net *net, struct ifreq *);
3645unsigned int dev_get_flags(const struct net_device *);
Petr Machata6d040322018-12-06 17:05:43 +00003646int __dev_change_flags(struct net_device *dev, unsigned int flags,
3647 struct netlink_ext_ack *extack);
Petr Machata567c5e12018-12-06 17:05:42 +00003648int dev_change_flags(struct net_device *dev, unsigned int flags,
3649 struct netlink_ext_ack *extack);
David S. Millercb178192013-09-30 15:36:45 -04003650void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3651 unsigned int gchanges);
Joe Perchesf629d202013-09-26 14:48:15 -07003652int dev_change_name(struct net_device *, const char *);
3653int dev_set_alias(struct net_device *, const char *, size_t);
Florian Westphal6c557002017-10-02 23:50:05 +02003654int dev_get_alias(const struct net_device *, char *, size_t);
Joe Perchesf629d202013-09-26 14:48:15 -07003655int dev_change_net_namespace(struct net_device *, struct net *, const char *);
WANG Congf51048c2017-07-06 15:01:57 -07003656int __dev_set_mtu(struct net_device *, int);
Stephen Hemminger7a4c53b2018-07-27 13:43:23 -07003657int dev_set_mtu_ext(struct net_device *dev, int mtu,
3658 struct netlink_ext_ack *extack);
Joe Perchesf629d202013-09-26 14:48:15 -07003659int dev_set_mtu(struct net_device *, int);
Cong Wang6a643dd2018-01-25 18:26:22 -08003660int dev_change_tx_queue_len(struct net_device *, unsigned long);
Joe Perchesf629d202013-09-26 14:48:15 -07003661void dev_set_group(struct net_device *, int);
Petr Machatad59cdf92018-12-13 11:54:35 +00003662int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3663 struct netlink_ext_ack *extack);
Petr Machata3a37a962018-12-13 11:54:30 +00003664int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3665 struct netlink_ext_ack *extack);
Joe Perchesf629d202013-09-26 14:48:15 -07003666int dev_change_carrier(struct net_device *, bool new_carrier);
3667int dev_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01003668 struct netdev_phys_item_id *ppid);
David Aherndb24a902015-03-17 20:23:15 -06003669int dev_get_phys_port_name(struct net_device *dev,
3670 char *name, size_t len);
Florian Fainellid6abc5962019-02-06 09:45:35 -08003671int dev_get_port_parent_id(struct net_device *dev,
3672 struct netdev_phys_item_id *ppid, bool recurse);
3673bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
Anuradha Karuppiahd746d702015-07-14 13:43:19 -07003674int dev_change_proto_down(struct net_device *dev, bool proto_down);
Andy Roulinb5899672019-02-22 18:06:36 +00003675int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
Steffen Klassertf53c7232017-12-20 10:41:36 +01003676struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
David S. Millerce937182014-08-30 19:22:20 -07003677struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3678 struct netdev_queue *txq, int *ret);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02003679
Jakub Kicinskif4e63522017-11-03 13:56:16 -07003680typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02003681int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3682 int fd, u32 flags);
Jakub Kicinskia25717d2018-07-11 20:36:41 -07003683u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3684 enum bpf_netdev_command cmd);
Jakub Kicinski84c6b862018-07-30 20:43:53 -07003685int xdp_umem_query(struct net_device *dev, u16 queue_id);
Daniel Borkmannd67b9cd2017-05-12 01:04:46 +02003686
Herbert Xua0265d22014-04-17 13:45:03 +08003687int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
Joe Perchesf629d202013-09-26 14:48:15 -07003688int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
Nikolay Aleksandrovf4b05d22016-04-28 17:59:28 +02003689bool is_skb_forwardable(const struct net_device *dev,
3690 const struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691
Martin KaFai Lau4e3264d2016-11-09 15:36:33 -08003692static __always_inline int ____dev_forward_skb(struct net_device *dev,
3693 struct sk_buff *skb)
3694{
3695 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3696 unlikely(!is_skb_forwardable(dev, skb))) {
3697 atomic_long_inc(&dev->rx_dropped);
3698 kfree_skb(skb);
3699 return NET_RX_DROP;
3700 }
3701
3702 skb_scrub_packet(skb, true);
3703 skb->priority = 0;
3704 return 0;
3705}
3706
Maciej W. Rozycki9f9a7422018-10-09 23:57:49 +01003707bool dev_nit_active(struct net_device *dev);
David Ahern74b20582016-05-10 11:19:50 -07003708void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3709
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03003710extern int netdev_budget;
Matthew Whitehead7acf8a12017-04-19 12:37:10 -04003711extern unsigned int netdev_budget_usecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712
3713/* Called by rtnetlink.c:rtnl_unlock() */
Joe Perchesf629d202013-09-26 14:48:15 -07003714void netdev_run_todo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003716/**
3717 * dev_put - release reference to device
3718 * @dev: network device
3719 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07003720 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003721 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722static inline void dev_put(struct net_device *dev)
3723{
Christoph Lameter933393f2011-12-22 11:58:51 -06003724 this_cpu_dec(*dev->pcpu_refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725}
3726
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003727/**
3728 * dev_hold - get reference to device
3729 * @dev: network device
3730 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07003731 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003732 */
Stephen Hemminger15333062006-03-20 22:32:28 -08003733static inline void dev_hold(struct net_device *dev)
3734{
Christoph Lameter933393f2011-12-22 11:58:51 -06003735 this_cpu_inc(*dev->pcpu_refcnt);
Stephen Hemminger15333062006-03-20 22:32:28 -08003736}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
3738/* Carrier loss detection, dial on demand. The functions netif_carrier_on
3739 * and _off may be called from IRQ context, but it is caller
3740 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08003741 *
3742 * The name carrier is inappropriate, these functions should really be
3743 * called netif_lowerlayer_*() because they represent the state of any
3744 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 */
3746
Joe Perchesf629d202013-09-26 14:48:15 -07003747void linkwatch_init_dev(struct net_device *dev);
3748void linkwatch_fire_event(struct net_device *dev);
3749void linkwatch_forget_dev(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003751/**
3752 * netif_carrier_ok - test if carrier present
3753 * @dev: network device
3754 *
3755 * Check if carrier is present on device
3756 */
David S. Miller4d295152012-03-07 21:02:35 -05003757static inline bool netif_carrier_ok(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758{
3759 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
3760}
3761
Joe Perchesf629d202013-09-26 14:48:15 -07003762unsigned long dev_trans_start(struct net_device *dev);
Eric Dumazet9d214932009-05-17 20:55:16 -07003763
Joe Perchesf629d202013-09-26 14:48:15 -07003764void __netdev_watchdog_up(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765
Joe Perchesf629d202013-09-26 14:48:15 -07003766void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767
Joe Perchesf629d202013-09-26 14:48:15 -07003768void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003770/**
3771 * netif_dormant_on - mark device as dormant.
3772 * @dev: network device
3773 *
3774 * Mark device as dormant (as per RFC2863).
3775 *
3776 * The dormant state indicates that the relevant interface is not
3777 * actually in a condition to pass packets (i.e., it is not 'up') but is
3778 * in a "pending" state, waiting for some external event. For "on-
3779 * demand" interfaces, this new state identifies the situation where the
3780 * interface is waiting for events to place it in the up state.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003781 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08003782static inline void netif_dormant_on(struct net_device *dev)
3783{
3784 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
3785 linkwatch_fire_event(dev);
3786}
3787
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003788/**
3789 * netif_dormant_off - set device as not dormant.
3790 * @dev: network device
3791 *
3792 * Device is not in dormant state.
3793 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08003794static inline void netif_dormant_off(struct net_device *dev)
3795{
3796 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
3797 linkwatch_fire_event(dev);
3798}
3799
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003800/**
Zhang Shengju8ecbc402017-04-26 11:05:12 +08003801 * netif_dormant - test if device is dormant
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003802 * @dev: network device
3803 *
Zhang Shengju8ecbc402017-04-26 11:05:12 +08003804 * Check if device is dormant.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003805 */
David S. Miller4d295152012-03-07 21:02:35 -05003806static inline bool netif_dormant(const struct net_device *dev)
Stefan Rompfb00055a2006-03-20 17:09:11 -08003807{
3808 return test_bit(__LINK_STATE_DORMANT, &dev->state);
3809}
3810
3811
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003812/**
3813 * netif_oper_up - test if device is operational
3814 * @dev: network device
3815 *
3816 * Check if carrier is operational
3817 */
David S. Miller4d295152012-03-07 21:02:35 -05003818static inline bool netif_oper_up(const struct net_device *dev)
Eric Dumazetd94d9fe2009-11-04 09:50:58 -08003819{
Stefan Rompfb00055a2006-03-20 17:09:11 -08003820 return (dev->operstate == IF_OPER_UP ||
3821 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
3822}
3823
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003824/**
3825 * netif_device_present - is device available or removed
3826 * @dev: network device
3827 *
3828 * Check if device has not been removed from system.
3829 */
David S. Miller4d295152012-03-07 21:02:35 -05003830static inline bool netif_device_present(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831{
3832 return test_bit(__LINK_STATE_PRESENT, &dev->state);
3833}
3834
Joe Perchesf629d202013-09-26 14:48:15 -07003835void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836
Joe Perchesf629d202013-09-26 14:48:15 -07003837void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
3839/*
3840 * Network interface message level settings
3841 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842
3843enum {
3844 NETIF_MSG_DRV = 0x0001,
3845 NETIF_MSG_PROBE = 0x0002,
3846 NETIF_MSG_LINK = 0x0004,
3847 NETIF_MSG_TIMER = 0x0008,
3848 NETIF_MSG_IFDOWN = 0x0010,
3849 NETIF_MSG_IFUP = 0x0020,
3850 NETIF_MSG_RX_ERR = 0x0040,
3851 NETIF_MSG_TX_ERR = 0x0080,
3852 NETIF_MSG_TX_QUEUED = 0x0100,
3853 NETIF_MSG_INTR = 0x0200,
3854 NETIF_MSG_TX_DONE = 0x0400,
3855 NETIF_MSG_RX_STATUS = 0x0800,
3856 NETIF_MSG_PKTDATA = 0x1000,
3857 NETIF_MSG_HW = 0x2000,
3858 NETIF_MSG_WOL = 0x4000,
3859};
3860
3861#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
3862#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
3863#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
3864#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
3865#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
3866#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
3867#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
3868#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
3869#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
3870#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
3871#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
3872#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
3873#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
3874#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
3875#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
3876
3877static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
3878{
3879 /* use default */
3880 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
3881 return default_msg_enable_bits;
3882 if (debug_value == 0) /* no output */
3883 return 0;
3884 /* set low N bits */
Andy Shevchenkof4d7b3e2019-02-27 13:37:26 +03003885 return (1U << debug_value) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886}
3887
David S. Millerc773e842008-07-08 23:13:53 -07003888static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07003889{
David S. Millerc773e842008-07-08 23:13:53 -07003890 spin_lock(&txq->_xmit_lock);
3891 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07003892}
3893
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02003894static inline bool __netif_tx_acquire(struct netdev_queue *txq)
3895{
3896 __acquire(&txq->_xmit_lock);
3897 return true;
3898}
3899
3900static inline void __netif_tx_release(struct netdev_queue *txq)
3901{
3902 __release(&txq->_xmit_lock);
3903}
3904
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003905static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
3906{
3907 spin_lock_bh(&txq->_xmit_lock);
3908 txq->xmit_lock_owner = smp_processor_id();
3909}
3910
David S. Miller4d295152012-03-07 21:02:35 -05003911static inline bool __netif_tx_trylock(struct netdev_queue *txq)
David S. Millerc773e842008-07-08 23:13:53 -07003912{
David S. Miller4d295152012-03-07 21:02:35 -05003913 bool ok = spin_trylock(&txq->_xmit_lock);
David S. Millerc773e842008-07-08 23:13:53 -07003914 if (likely(ok))
3915 txq->xmit_lock_owner = smp_processor_id();
3916 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07003917}
3918
David S. Millerc773e842008-07-08 23:13:53 -07003919static inline void __netif_tx_unlock(struct netdev_queue *txq)
3920{
3921 txq->xmit_lock_owner = -1;
3922 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07003923}
3924
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003925static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
3926{
3927 txq->xmit_lock_owner = -1;
3928 spin_unlock_bh(&txq->_xmit_lock);
3929}
3930
Eric Dumazet08baf562009-05-25 22:58:01 -07003931static inline void txq_trans_update(struct netdev_queue *txq)
3932{
3933 if (txq->xmit_lock_owner != -1)
3934 txq->trans_start = jiffies;
3935}
3936
Florian Westphalba162f82016-05-03 16:31:00 +02003937/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
3938static inline void netif_trans_update(struct net_device *dev)
3939{
Florian Westphal9b366272016-05-03 16:33:14 +02003940 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3941
3942 if (txq->trans_start != jiffies)
3943 txq->trans_start = jiffies;
Florian Westphalba162f82016-05-03 16:31:00 +02003944}
3945
David S. Millerc3f26a22008-07-31 16:58:50 -07003946/**
3947 * netif_tx_lock - grab network device transmit lock
3948 * @dev: network device
David S. Millerc3f26a22008-07-31 16:58:50 -07003949 *
3950 * Get network device transmit lock
3951 */
3952static inline void netif_tx_lock(struct net_device *dev)
3953{
3954 unsigned int i;
3955 int cpu;
3956
3957 spin_lock(&dev->tx_global_lock);
3958 cpu = smp_processor_id();
3959 for (i = 0; i < dev->num_tx_queues; i++) {
3960 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3961
3962 /* We are the only thread of execution doing a
3963 * freeze, but we have to grab the _xmit_lock in
3964 * order to synchronize with threads which are in
3965 * the ->hard_start_xmit() handler and already
3966 * checked the frozen bit.
3967 */
3968 __netif_tx_lock(txq, cpu);
3969 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
3970 __netif_tx_unlock(txq);
3971 }
3972}
3973
3974static inline void netif_tx_lock_bh(struct net_device *dev)
3975{
3976 local_bh_disable();
3977 netif_tx_lock(dev);
3978}
3979
Herbert Xu932ff272006-06-09 12:20:56 -07003980static inline void netif_tx_unlock(struct net_device *dev)
3981{
David S. Millere8a04642008-07-17 00:34:19 -07003982 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07003983
David S. Millere8a04642008-07-17 00:34:19 -07003984 for (i = 0; i < dev->num_tx_queues; i++) {
3985 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07003986
David S. Millerc3f26a22008-07-31 16:58:50 -07003987 /* No need to grab the _xmit_lock here. If the
3988 * queue is not stopped for another reason, we
3989 * force a schedule.
3990 */
3991 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
Krishna Kumar7b3d3e42009-08-29 20:21:21 +00003992 netif_schedule_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07003993 }
3994 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07003995}
3996
3997static inline void netif_tx_unlock_bh(struct net_device *dev)
3998{
David S. Millere8a04642008-07-17 00:34:19 -07003999 netif_tx_unlock(dev);
4000 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07004001}
4002
David S. Millerc773e842008-07-08 23:13:53 -07004003#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07004004 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07004005 __netif_tx_lock(txq, cpu); \
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02004006 } else { \
4007 __netif_tx_acquire(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07004008 } \
4009}
4010
Eric W. Biederman5efeac42014-03-27 15:42:20 -07004011#define HARD_TX_TRYLOCK(dev, txq) \
4012 (((dev->features & NETIF_F_LLTX) == 0) ? \
4013 __netif_tx_trylock(txq) : \
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02004014 __netif_tx_acquire(txq))
Eric W. Biederman5efeac42014-03-27 15:42:20 -07004015
David S. Millerc773e842008-07-08 23:13:53 -07004016#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07004017 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07004018 __netif_tx_unlock(txq); \
Michael S. Tsirkin5a717f42016-11-24 07:04:08 +02004019 } else { \
4020 __netif_tx_release(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07004021 } \
4022}
4023
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024static inline void netif_tx_disable(struct net_device *dev)
4025{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004026 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07004027 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004028
David S. Millerc3f26a22008-07-31 16:58:50 -07004029 local_bh_disable();
4030 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004031 for (i = 0; i < dev->num_tx_queues; i++) {
4032 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07004033
4034 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004035 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07004036 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004037 }
David S. Millerc3f26a22008-07-31 16:58:50 -07004038 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039}
4040
David S. Millere308a5d2008-07-15 00:13:44 -07004041static inline void netif_addr_lock(struct net_device *dev)
4042{
4043 spin_lock(&dev->addr_list_lock);
4044}
4045
Jiri Pirko2429f7a2012-01-09 06:36:54 +00004046static inline void netif_addr_lock_nested(struct net_device *dev)
4047{
Vlad Yasevich25175ba2014-05-16 17:04:54 -04004048 int subclass = SINGLE_DEPTH_NESTING;
4049
4050 if (dev->netdev_ops->ndo_get_lock_subclass)
4051 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
4052
4053 spin_lock_nested(&dev->addr_list_lock, subclass);
Jiri Pirko2429f7a2012-01-09 06:36:54 +00004054}
4055
David S. Millere308a5d2008-07-15 00:13:44 -07004056static inline void netif_addr_lock_bh(struct net_device *dev)
4057{
4058 spin_lock_bh(&dev->addr_list_lock);
4059}
4060
4061static inline void netif_addr_unlock(struct net_device *dev)
4062{
4063 spin_unlock(&dev->addr_list_lock);
4064}
4065
4066static inline void netif_addr_unlock_bh(struct net_device *dev)
4067{
4068 spin_unlock_bh(&dev->addr_list_lock);
4069}
4070
Jiri Pirkof001fde2009-05-05 02:48:28 +00004071/*
Jiri Pirko31278e72009-06-17 01:12:19 +00004072 * dev_addrs walker. Should be used only for read access. Call with
Jiri Pirkof001fde2009-05-05 02:48:28 +00004073 * rcu_read_lock held.
4074 */
4075#define for_each_dev_addr(dev, ha) \
Jiri Pirko31278e72009-06-17 01:12:19 +00004076 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00004077
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078/* These functions live elsewhere (drivers/net/net_init.c, but related) */
4079
Joe Perchesf629d202013-09-26 14:48:15 -07004080void ether_setup(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081
4082/* Support for loadable net-drivers */
Joe Perchesf629d202013-09-26 14:48:15 -07004083struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02004084 unsigned char name_assign_type,
Joe Perchesf629d202013-09-26 14:48:15 -07004085 void (*setup)(struct net_device *),
4086 unsigned int txqs, unsigned int rxqs);
Cong Wang0ad646c2017-10-13 11:58:53 -07004087int dev_get_valid_name(struct net *net, struct net_device *dev,
4088 const char *name);
4089
Tom Gundersenc835a672014-07-14 16:37:24 +02004090#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4091 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
Tom Herbert36909ea2011-01-09 19:36:31 +00004092
Tom Gundersenc835a672014-07-14 16:37:24 +02004093#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4094 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4095 count)
Tom Herbert36909ea2011-01-09 19:36:31 +00004096
Joe Perchesf629d202013-09-26 14:48:15 -07004097int register_netdev(struct net_device *dev);
4098void unregister_netdev(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004099
Jiri Pirko22bedad32010-04-01 21:22:57 +00004100/* General hardware address lists handling functions */
Joe Perchesf629d202013-09-26 14:48:15 -07004101int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4102 struct netdev_hw_addr_list *from_list, int addr_len);
4103void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4104 struct netdev_hw_addr_list *from_list, int addr_len);
Alexander Duyck670e5b82014-05-28 18:44:46 -07004105int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4106 struct net_device *dev,
4107 int (*sync)(struct net_device *, const unsigned char *),
4108 int (*unsync)(struct net_device *,
4109 const unsigned char *));
Ivan Khoronzhuke7946762018-11-08 22:27:54 +02004110int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4111 struct net_device *dev,
4112 int (*sync)(struct net_device *,
4113 const unsigned char *, int),
4114 int (*unsync)(struct net_device *,
4115 const unsigned char *, int));
4116void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4117 struct net_device *dev,
4118 int (*unsync)(struct net_device *,
4119 const unsigned char *, int));
Alexander Duyck670e5b82014-05-28 18:44:46 -07004120void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4121 struct net_device *dev,
4122 int (*unsync)(struct net_device *,
4123 const unsigned char *));
Joe Perchesf629d202013-09-26 14:48:15 -07004124void __hw_addr_init(struct netdev_hw_addr_list *list);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004125
Jiri Pirkof001fde2009-05-05 02:48:28 +00004126/* Functions used for device addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07004127int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4128 unsigned char addr_type);
4129int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4130 unsigned char addr_type);
Joe Perchesf629d202013-09-26 14:48:15 -07004131void dev_addr_flush(struct net_device *dev);
4132int dev_addr_init(struct net_device *dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004133
4134/* Functions used for unicast addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07004135int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4136int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4137int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4138int dev_uc_sync(struct net_device *to, struct net_device *from);
4139int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4140void dev_uc_unsync(struct net_device *to, struct net_device *from);
4141void dev_uc_flush(struct net_device *dev);
4142void dev_uc_init(struct net_device *dev);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004143
Alexander Duyck670e5b82014-05-28 18:44:46 -07004144/**
4145 * __dev_uc_sync - Synchonize device's unicast list
4146 * @dev: device to sync
4147 * @sync: function to call if address should be added
4148 * @unsync: function to call if address should be removed
4149 *
4150 * Add newly added addresses to the interface, and release
4151 * addresses that have been deleted.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004152 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004153static inline int __dev_uc_sync(struct net_device *dev,
4154 int (*sync)(struct net_device *,
4155 const unsigned char *),
4156 int (*unsync)(struct net_device *,
4157 const unsigned char *))
4158{
4159 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4160}
4161
4162/**
Masanari Iidae793c0f2014-09-04 23:44:36 +09004163 * __dev_uc_unsync - Remove synchronized addresses from device
Alexander Duyck670e5b82014-05-28 18:44:46 -07004164 * @dev: device to sync
4165 * @unsync: function to call if address should be removed
4166 *
4167 * Remove all addresses that were added to the device by dev_uc_sync().
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004168 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004169static inline void __dev_uc_unsync(struct net_device *dev,
4170 int (*unsync)(struct net_device *,
4171 const unsigned char *))
4172{
4173 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4174}
4175
Jiri Pirko22bedad32010-04-01 21:22:57 +00004176/* Functions used for multicast addresses handling */
Joe Perchesf629d202013-09-26 14:48:15 -07004177int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4178int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4179int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4180int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4181int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4182int dev_mc_sync(struct net_device *to, struct net_device *from);
4183int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4184void dev_mc_unsync(struct net_device *to, struct net_device *from);
4185void dev_mc_flush(struct net_device *dev);
4186void dev_mc_init(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08004187
Alexander Duyck670e5b82014-05-28 18:44:46 -07004188/**
4189 * __dev_mc_sync - Synchonize device's multicast list
4190 * @dev: device to sync
4191 * @sync: function to call if address should be added
4192 * @unsync: function to call if address should be removed
4193 *
4194 * Add newly added addresses to the interface, and release
4195 * addresses that have been deleted.
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004196 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004197static inline int __dev_mc_sync(struct net_device *dev,
4198 int (*sync)(struct net_device *,
4199 const unsigned char *),
4200 int (*unsync)(struct net_device *,
4201 const unsigned char *))
4202{
4203 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4204}
4205
4206/**
Masanari Iidae793c0f2014-09-04 23:44:36 +09004207 * __dev_mc_unsync - Remove synchronized addresses from device
Alexander Duyck670e5b82014-05-28 18:44:46 -07004208 * @dev: device to sync
4209 * @unsync: function to call if address should be removed
4210 *
4211 * Remove all addresses that were added to the device by dev_mc_sync().
Bjorn Helgaas5e82b4b2016-03-23 13:47:23 -05004212 */
Alexander Duyck670e5b82014-05-28 18:44:46 -07004213static inline void __dev_mc_unsync(struct net_device *dev,
4214 int (*unsync)(struct net_device *,
4215 const unsigned char *))
4216{
4217 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4218}
4219
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220/* Functions used for secondary unicast and multicast support */
Joe Perchesf629d202013-09-26 14:48:15 -07004221void dev_set_rx_mode(struct net_device *dev);
4222void __dev_set_rx_mode(struct net_device *dev);
4223int dev_set_promiscuity(struct net_device *dev, int inc);
4224int dev_set_allmulti(struct net_device *dev, int inc);
4225void netdev_state_change(struct net_device *dev);
4226void netdev_notify_peers(struct net_device *dev);
4227void netdev_features_change(struct net_device *dev);
Herbert Xufb286bb2005-11-10 13:01:24 -08004228/* Load a device via the kmod */
Joe Perchesf629d202013-09-26 14:48:15 -07004229void dev_load(struct net *net, const char *name);
4230struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4231 struct rtnl_link_stats64 *storage);
4232void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4233 const struct net_device_stats *netdev_stats);
Herbert Xufb286bb2005-11-10 13:01:24 -08004234
4235extern int netdev_max_backlog;
Eric Dumazet3b098e22010-05-15 23:57:10 -07004236extern int netdev_tstamp_prequeue;
Herbert Xufb286bb2005-11-10 13:01:24 -08004237extern int weight_p;
Matthias Tafelmeier3d48b532016-12-29 21:37:21 +01004238extern int dev_weight_rx_bias;
4239extern int dev_weight_tx_bias;
4240extern int dev_rx_weight;
4241extern int dev_tx_weight;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004242
Joe Perchesf629d202013-09-26 14:48:15 -07004243bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
Vlad Yasevich44a40852014-05-16 17:20:38 -04004244struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4245 struct list_head **iter);
Joe Perchesf629d202013-09-26 14:48:15 -07004246struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4247 struct list_head **iter);
Veaceslav Falico8b5be852013-08-28 23:25:08 +02004248
4249/* iterate through upper list, must be called under RCU read lock */
Vlad Yasevich44a40852014-05-16 17:20:38 -04004250#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4251 for (iter = &(dev)->adj_list.upper, \
4252 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4253 updev; \
4254 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4255
David Ahern1a3f0602016-10-17 19:15:44 -07004256int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4257 int (*fn)(struct net_device *upper_dev,
4258 void *data),
4259 void *data);
4260
4261bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4262 struct net_device *upper_dev);
4263
Ido Schimmel25cc72a2017-09-01 10:52:31 +02004264bool netdev_has_any_upper_dev(struct net_device *dev);
4265
Joe Perchesf629d202013-09-26 14:48:15 -07004266void *netdev_lower_get_next_private(struct net_device *dev,
4267 struct list_head **iter);
4268void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4269 struct list_head **iter);
Veaceslav Falico31088a12013-09-25 09:20:12 +02004270
4271#define netdev_for_each_lower_private(dev, priv, iter) \
4272 for (iter = (dev)->adj_list.lower.next, \
4273 priv = netdev_lower_get_next_private(dev, &(iter)); \
4274 priv; \
4275 priv = netdev_lower_get_next_private(dev, &(iter)))
4276
4277#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4278 for (iter = &(dev)->adj_list.lower, \
4279 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4280 priv; \
4281 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4282
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004283void *netdev_lower_get_next(struct net_device *dev,
4284 struct list_head **iter);
Jiri Pirko7ce856a2016-07-04 08:23:12 +02004285
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004286#define netdev_for_each_lower_dev(dev, ldev, iter) \
Nikolay Aleksandrovcfdd28b2016-02-17 18:00:31 +01004287 for (iter = (dev)->adj_list.lower.next, \
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004288 ldev = netdev_lower_get_next(dev, &(iter)); \
4289 ldev; \
4290 ldev = netdev_lower_get_next(dev, &(iter)))
4291
Jiri Pirko7ce856a2016-07-04 08:23:12 +02004292struct net_device *netdev_all_lower_get_next(struct net_device *dev,
4293 struct list_head **iter);
4294struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
4295 struct list_head **iter);
4296
David Ahern1a3f0602016-10-17 19:15:44 -07004297int netdev_walk_all_lower_dev(struct net_device *dev,
4298 int (*fn)(struct net_device *lower_dev,
4299 void *data),
4300 void *data);
4301int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4302 int (*fn)(struct net_device *lower_dev,
4303 void *data),
4304 void *data);
4305
Joe Perchesf629d202013-09-26 14:48:15 -07004306void *netdev_adjacent_get_private(struct list_head *adj_list);
dingtianhonge001bfa2013-12-13 10:19:55 +08004307void *netdev_lower_get_first_private_rcu(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07004308struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4309struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
David Ahern42ab19e2017-10-04 17:48:47 -07004310int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4311 struct netlink_ext_ack *extack);
Joe Perchesf629d202013-09-26 14:48:15 -07004312int netdev_master_upper_dev_link(struct net_device *dev,
Jiri Pirko6dffb042015-12-03 12:12:10 +01004313 struct net_device *upper_dev,
David Ahern42ab19e2017-10-04 17:48:47 -07004314 void *upper_priv, void *upper_info,
4315 struct netlink_ext_ack *extack);
Joe Perchesf629d202013-09-26 14:48:15 -07004316void netdev_upper_dev_unlink(struct net_device *dev,
4317 struct net_device *upper_dev);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01004318void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
Joe Perchesf629d202013-09-26 14:48:15 -07004319void *netdev_lower_dev_get_private(struct net_device *dev,
4320 struct net_device *lower_dev);
Jiri Pirko04d48262015-12-03 12:12:15 +01004321void netdev_lower_state_changed(struct net_device *lower_dev,
4322 void *lower_state_info);
Eric Dumazet960fb622014-11-16 06:23:05 -08004323
4324/* RSS keys are 40 or 52 bytes long */
4325#define NETDEV_RSS_KEY_LEN 52
Kim Jonesba905f52016-02-02 03:51:16 +00004326extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
Eric Dumazet960fb622014-11-16 06:23:05 -08004327void netdev_rss_key_fill(void *buffer, size_t len);
4328
Sabrina Dubroca952fcfd2016-08-12 16:10:33 +02004329int dev_get_nest_level(struct net_device *dev);
Joe Perchesf629d202013-09-26 14:48:15 -07004330int skb_checksum_help(struct sk_buff *skb);
Davide Carattib72b5bf2017-05-18 15:44:38 +02004331int skb_crc32c_csum_help(struct sk_buff *skb);
Davide Caratti43c26a12017-05-18 15:44:41 +02004332int skb_csum_hwoffload_help(struct sk_buff *skb,
4333 const netdev_features_t features);
4334
Joe Perchesf629d202013-09-26 14:48:15 -07004335struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4336 netdev_features_t features, bool tx_path);
4337struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4338 netdev_features_t features);
Cong Wang12b00042013-02-05 16:36:38 +00004339
Moni Shoua61bd3852015-02-03 16:48:29 +02004340struct netdev_bonding_info {
4341 ifslave slave;
4342 ifbond master;
4343};
4344
4345struct netdev_notifier_bonding_info {
4346 struct netdev_notifier_info info; /* must be first */
4347 struct netdev_bonding_info bonding_info;
4348};
4349
4350void netdev_bonding_info_change(struct net_device *dev,
4351 struct netdev_bonding_info *bonding_info);
4352
Cong Wang12b00042013-02-05 16:36:38 +00004353static inline
4354struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4355{
4356 return __skb_gso_segment(skb, features, true);
4357}
Vlad Yasevich53d64712014-03-27 17:26:18 -04004358__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004359
4360static inline bool can_checksum_protocol(netdev_features_t features,
4361 __be16 protocol)
4362{
Tom Herbertc8cd0982015-12-14 11:19:44 -08004363 if (protocol == htons(ETH_P_FCOE))
4364 return !!(features & NETIF_F_FCOE_CRC);
4365
4366 /* Assume this is an IP checksum (not SCTP CRC) */
4367
4368 if (features & NETIF_F_HW_CSUM) {
4369 /* Can checksum everything */
4370 return true;
4371 }
4372
4373 switch (protocol) {
4374 case htons(ETH_P_IP):
4375 return !!(features & NETIF_F_IP_CSUM);
4376 case htons(ETH_P_IPV6):
4377 return !!(features & NETIF_F_IPV6_CSUM);
4378 default:
4379 return false;
4380 }
Pravin B Shelarec5f0612013-03-07 09:28:01 +00004381}
Cong Wang12b00042013-02-05 16:36:38 +00004382
Herbert Xufb286bb2005-11-10 13:01:24 -08004383#ifdef CONFIG_BUG
Cong Wang7fe50ac2018-11-12 14:47:18 -08004384void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08004385#else
Cong Wang7fe50ac2018-11-12 14:47:18 -08004386static inline void netdev_rx_csum_fault(struct net_device *dev,
4387 struct sk_buff *skb)
Herbert Xufb286bb2005-11-10 13:01:24 -08004388{
4389}
4390#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391/* rx skb timestamps */
Joe Perchesf629d202013-09-26 14:48:15 -07004392void net_enable_timestamp(void);
4393void net_disable_timestamp(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03004395#ifdef CONFIG_PROC_FS
Joe Perchesf629d202013-09-26 14:48:15 -07004396int __init dev_proc_init(void);
Cong Wang900ff8c2013-02-18 19:20:33 +00004397#else
4398#define dev_proc_init() 0
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03004399#endif
4400
David S. Miller47982482014-08-22 16:21:53 -07004401static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
David S. Millerfa2dbdc2014-08-29 21:55:22 -07004402 struct sk_buff *skb, struct net_device *dev,
4403 bool more)
David S. Miller47982482014-08-22 16:21:53 -07004404{
Florian Westphal6b16f9e2019-04-01 16:42:14 +02004405 __this_cpu_write(softnet_data.xmit.more, more);
David S. Miller0b725a22014-08-25 15:51:53 -07004406 return ops->ndo_start_xmit(skb, dev);
David S. Miller47982482014-08-22 16:21:53 -07004407}
4408
Florian Westphal97cdcf32019-04-01 16:42:13 +02004409static inline bool netdev_xmit_more(void)
4410{
4411 return __this_cpu_read(softnet_data.xmit.more);
4412}
4413
David S. Miller10b3ad82014-08-29 21:07:24 -07004414static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
David S. Millerfa2dbdc2014-08-29 21:55:22 -07004415 struct netdev_queue *txq, bool more)
David S. Miller47982482014-08-22 16:21:53 -07004416{
4417 const struct net_device_ops *ops = dev->netdev_ops;
Alexey Dobriyan21834352018-11-24 12:01:41 +03004418 netdev_tx_t rc;
David S. Miller47982482014-08-22 16:21:53 -07004419
David S. Millerfa2dbdc2014-08-29 21:55:22 -07004420 rc = __netdev_start_xmit(ops, skb, dev, more);
David S. Miller10b3ad82014-08-29 21:07:24 -07004421 if (rc == NETDEV_TX_OK)
4422 txq_trans_update(txq);
4423
4424 return rc;
David S. Miller47982482014-08-22 16:21:53 -07004425}
4426
stephen hemmingerb793dc52017-08-18 13:46:20 -07004427int netdev_class_create_file_ns(const struct class_attribute *class_attr,
Linus Torvalds42a2d922013-11-13 17:40:34 +09004428 const void *ns);
stephen hemmingerb793dc52017-08-18 13:46:20 -07004429void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
Linus Torvalds42a2d922013-11-13 17:40:34 +09004430 const void *ns);
Tejun Heo58292cbe2013-09-11 22:29:04 -04004431
stephen hemmingerb793dc52017-08-18 13:46:20 -07004432static inline int netdev_class_create_file(const struct class_attribute *class_attr)
Tejun Heo58292cbe2013-09-11 22:29:04 -04004433{
4434 return netdev_class_create_file_ns(class_attr, NULL);
4435}
4436
stephen hemmingerb793dc52017-08-18 13:46:20 -07004437static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
Tejun Heo58292cbe2013-09-11 22:29:04 -04004438{
4439 netdev_class_remove_file_ns(class_attr, NULL);
4440}
Jay Vosburghb8a97872008-06-13 18:12:04 -07004441
stephen hemminger737aec52017-08-18 13:46:22 -07004442extern const struct kobj_ns_type_operations net_ns_type_operations;
Johannes Berg04600792010-08-05 17:45:15 +02004443
Joe Perchesf629d202013-09-26 14:48:15 -07004444const char *netdev_drivername(const struct net_device *dev);
Arjan van de Ven6579e572008-07-21 13:31:48 -07004445
Joe Perchesf629d202013-09-26 14:48:15 -07004446void linkwatch_run_queue(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03004447
Michal Kubečekda081432014-05-20 08:29:25 +02004448static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4449 netdev_features_t f2)
4450{
Tom Herbertc8cd0982015-12-14 11:19:44 -08004451 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4452 if (f1 & NETIF_F_HW_CSUM)
Tom Herbertb6a0e722016-01-11 10:19:10 -08004453 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
Tom Herbertc8cd0982015-12-14 11:19:44 -08004454 else
Tom Herbertb6a0e722016-01-11 10:19:10 -08004455 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
Tom Herbertc8cd0982015-12-14 11:19:44 -08004456 }
Michal Kubečekda081432014-05-20 08:29:25 +02004457
Tom Herbertc8cd0982015-12-14 11:19:44 -08004458 return f1 & f2;
Michal Kubečekda081432014-05-20 08:29:25 +02004459}
4460
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004461static inline netdev_features_t netdev_get_wanted_features(
4462 struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00004463{
4464 return (dev->features & ~dev->hw_features) | dev->wanted_features;
4465}
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004466netdev_features_t netdev_increment_features(netdev_features_t all,
4467 netdev_features_t one, netdev_features_t mask);
Eric Dumazetb0ce3502013-05-16 07:34:53 +00004468
4469/* Allow TSO being used on stacked device :
4470 * Performing the GSO segmentation before last device
4471 * is a performance improvement.
4472 */
4473static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4474 netdev_features_t mask)
4475{
4476 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4477}
4478
Michał Mirosław6cb6a272011-04-02 22:48:47 -07004479int __netdev_update_features(struct net_device *dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00004480void netdev_update_features(struct net_device *dev);
Michał Mirosławafe12cc2011-05-07 03:22:17 +00004481void netdev_change_features(struct net_device *dev);
Herbert Xu7f353bf2007-08-10 15:47:58 -07004482
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08004483void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4484 struct net_device *dev);
4485
Toshiaki Makitae38f3022015-03-27 14:31:13 +09004486netdev_features_t passthru_features_check(struct sk_buff *skb,
4487 struct net_device *dev,
4488 netdev_features_t features);
Florian Westphalc1e756b2014-05-05 15:00:44 +02004489netdev_features_t netif_skb_features(struct sk_buff *skb);
Jesse Gross58e998c2010-10-29 12:14:55 +00004490
David S. Miller4d295152012-03-07 21:02:35 -05004491static inline bool net_gso_ok(netdev_features_t features, int gso_type)
Herbert Xubcd76112006-06-30 13:36:35 -07004492{
Marcelo Ricardo Leitner7b748342016-04-25 15:13:17 -03004493 netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
Michał Mirosław0345e182011-11-16 14:05:33 +00004494
4495 /* check flags correspondence */
4496 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
Michał Mirosław0345e182011-11-16 14:05:33 +00004497 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4498 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
Alexander Duyckcbc53e02016-04-10 21:44:51 -04004499 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
Michał Mirosław0345e182011-11-16 14:05:33 +00004500 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4501 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
Tom Herbert4b282522014-06-14 23:23:52 -07004502 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4503 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
Tom Herbert7e133182016-05-18 09:06:10 -07004504 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4505 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
Tom Herbert4b282522014-06-14 23:23:52 -07004506 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4507 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
Alexander Duyck802ab552016-04-10 21:45:03 -04004508 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
Tom Herberte585f232014-11-04 09:06:54 -08004509 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
Marcelo Ricardo Leitner90017ac2016-06-02 15:05:43 -03004510 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
Steffen Klassertc7ef8f02017-04-14 10:05:36 +02004511 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
Willem de Bruijn0c19f8462017-11-21 10:22:25 -05004512 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
Willem de Bruijn83aa0252018-04-26 13:42:21 -04004513 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
Michał Mirosław0345e182011-11-16 14:05:33 +00004514
Herbert Xubcd76112006-06-30 13:36:35 -07004515 return (features & feature) == feature;
4516}
4517
David S. Miller4d295152012-03-07 21:02:35 -05004518static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
Herbert Xu576a30e2006-06-27 13:22:38 -07004519{
Herbert Xu278b2512009-06-03 21:20:51 -07004520 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
David S. Miller21dc3302010-08-23 00:13:46 -07004521 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
Herbert Xu576a30e2006-06-27 13:22:38 -07004522}
4523
Johannes Berg8b86a612015-04-17 15:45:04 +02004524static inline bool netif_needs_gso(struct sk_buff *skb,
David S. Miller4d295152012-03-07 21:02:35 -05004525 netdev_features_t features)
Herbert Xu79671682006-06-22 02:40:14 -07004526{
Jesse Grossfc741212011-01-09 06:23:32 +00004527 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
Yi Zoucdbee742012-03-16 23:08:11 +00004528 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4529 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
Herbert Xu79671682006-06-22 02:40:14 -07004530}
4531
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004532static inline void netif_set_gso_max_size(struct net_device *dev,
4533 unsigned int size)
4534{
4535 dev->gso_max_size = size;
4536}
4537
Wei-Chun Chao7a7ffba2013-12-26 13:10:22 -08004538static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4539 int pulled_hlen, u16 mac_offset,
4540 int mac_len)
4541{
4542 skb->protocol = protocol;
4543 skb->encapsulation = 1;
4544 skb_push(skb, pulled_hlen);
4545 skb_reset_transport_header(skb);
4546 skb->mac_header = mac_offset;
4547 skb->network_header = skb->mac_header + mac_len;
4548 skb->mac_len = mac_len;
4549}
4550
Sabrina Dubroca3c175782016-03-11 18:07:32 +01004551static inline bool netif_is_macsec(const struct net_device *dev)
4552{
4553 return dev->priv_flags & IFF_MACSEC;
4554}
4555
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004556static inline bool netif_is_macvlan(const struct net_device *dev)
John Fastabenda6cc0cf2013-11-06 09:54:46 -08004557{
4558 return dev->priv_flags & IFF_MACVLAN;
4559}
4560
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004561static inline bool netif_is_macvlan_port(const struct net_device *dev)
Mahesh Bandewar2f33e7d2014-12-06 15:53:04 -08004562{
4563 return dev->priv_flags & IFF_MACVLAN_PORT;
4564}
4565
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004566static inline bool netif_is_bond_master(const struct net_device *dev)
nikolay@redhat.com8a7fbfa2013-03-12 02:49:01 +00004567{
4568 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4569}
4570
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004571static inline bool netif_is_bond_slave(const struct net_device *dev)
Jiri Pirko1765a572011-02-12 06:48:36 +00004572{
4573 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4574}
4575
Ben Greear3bdc0eb2012-02-11 15:39:30 +00004576static inline bool netif_supports_nofcs(struct net_device *dev)
4577{
4578 return dev->priv_flags & IFF_SUPP_NOFCS;
4579}
4580
Daniel Borkmannd5256082019-01-30 12:49:48 +01004581static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4582{
4583 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4584}
4585
David Ahern007979e2015-09-29 20:07:10 -07004586static inline bool netif_is_l3_master(const struct net_device *dev)
David Ahern4e3c8992015-08-13 14:59:00 -06004587{
David Ahern007979e2015-09-29 20:07:10 -07004588 return dev->priv_flags & IFF_L3MDEV_MASTER;
David Ahern4e3c8992015-08-13 14:59:00 -06004589}
4590
David Ahernfee6d4c2015-10-05 08:51:24 -07004591static inline bool netif_is_l3_slave(const struct net_device *dev)
4592{
4593 return dev->priv_flags & IFF_L3MDEV_SLAVE;
4594}
4595
Jiri Pirko0894ae32015-08-27 09:31:19 +02004596static inline bool netif_is_bridge_master(const struct net_device *dev)
4597{
4598 return dev->priv_flags & IFF_EBRIDGE;
4599}
4600
Vlad Yasevich28f9ee22015-11-16 15:43:45 -05004601static inline bool netif_is_bridge_port(const struct net_device *dev)
4602{
4603 return dev->priv_flags & IFF_BRIDGE_PORT;
4604}
4605
Jiri Pirko35d4e172015-08-27 09:31:20 +02004606static inline bool netif_is_ovs_master(const struct net_device *dev)
4607{
4608 return dev->priv_flags & IFF_OPENVSWITCH;
4609}
4610
Jiri Pirko5be66142017-04-18 16:55:36 +02004611static inline bool netif_is_ovs_port(const struct net_device *dev)
4612{
4613 return dev->priv_flags & IFF_OVS_DATAPATH;
4614}
4615
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004616static inline bool netif_is_team_master(const struct net_device *dev)
Jiri Pirkoc981e422015-12-03 12:12:06 +01004617{
4618 return dev->priv_flags & IFF_TEAM;
4619}
4620
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004621static inline bool netif_is_team_port(const struct net_device *dev)
Jiri Pirkof7f019e2015-12-03 12:12:07 +01004622{
4623 return dev->priv_flags & IFF_TEAM_PORT;
4624}
4625
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004626static inline bool netif_is_lag_master(const struct net_device *dev)
Jiri Pirko7be61832015-12-03 12:12:08 +01004627{
4628 return netif_is_bond_master(dev) || netif_is_team_master(dev);
4629}
4630
Jiri Pirkob618aaa2015-12-04 15:01:31 +01004631static inline bool netif_is_lag_port(const struct net_device *dev)
Jiri Pirkoe0ba1412015-12-03 12:12:09 +01004632{
4633 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
4634}
4635
Keller, Jacob Ed4ab4282016-02-08 16:05:03 -08004636static inline bool netif_is_rxfh_configured(const struct net_device *dev)
4637{
4638 return dev->priv_flags & IFF_RXFH_CONFIGURED;
4639}
4640
Sridhar Samudrala30c8bd52018-05-24 09:55:13 -07004641static inline bool netif_is_failover(const struct net_device *dev)
4642{
4643 return dev->priv_flags & IFF_FAILOVER;
4644}
4645
4646static inline bool netif_is_failover_slave(const struct net_device *dev)
4647{
4648 return dev->priv_flags & IFF_FAILOVER_SLAVE;
4649}
4650
Eric Dumazet02875872014-10-05 18:38:35 -07004651/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
4652static inline void netif_keep_dst(struct net_device *dev)
4653{
4654 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
4655}
4656
Paolo Abeni18d3df32016-07-14 18:00:10 +02004657/* return true if dev can't cope with mtu frames that need vlan tag insertion */
4658static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
4659{
4660 /* TODO: reserve and use an additional IFF bit, if we get more users */
4661 return dev->priv_flags & IFF_MACSEC;
4662}
4663
Eric W. Biederman505d4f72008-11-07 22:54:20 -08004664extern struct pernet_operations __net_initdata loopback_net_ops;
Patrick McHardyb1b67dd2009-04-20 04:49:28 +00004665
Joe Perches571ba422010-02-09 11:49:47 +00004666/* Logging, debugging and troubleshooting/diagnostic helpers. */
4667
4668/* netdev_printk helpers, similar to dev_printk */
4669
4670static inline const char *netdev_name(const struct net_device *dev)
4671{
Veaceslav Falicoc6f854d2014-07-17 19:46:09 +02004672 if (!dev->name[0] || strchr(dev->name, '%'))
4673 return "(unnamed net_device)";
Joe Perches571ba422010-02-09 11:49:47 +00004674 return dev->name;
4675}
4676
David Ahern8397ed32017-06-07 12:26:23 -06004677static inline bool netdev_unregistering(const struct net_device *dev)
4678{
4679 return dev->reg_state == NETREG_UNREGISTERING;
4680}
4681
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02004682static inline const char *netdev_reg_state(const struct net_device *dev)
4683{
4684 switch (dev->reg_state) {
4685 case NETREG_UNINITIALIZED: return " (uninitialized)";
4686 case NETREG_REGISTERED: return "";
4687 case NETREG_UNREGISTERING: return " (unregistering)";
4688 case NETREG_UNREGISTERED: return " (unregistered)";
4689 case NETREG_RELEASED: return " (released)";
4690 case NETREG_DUMMY: return " (dummy)";
4691 }
4692
4693 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
4694 return " (unknown)";
4695}
4696
Joe Perchesce3fdb62019-02-02 19:47:25 -08004697__printf(3, 4) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004698void netdev_printk(const char *level, const struct net_device *dev,
4699 const char *format, ...);
Joe Perchesce3fdb62019-02-02 19:47:25 -08004700__printf(2, 3) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004701void netdev_emerg(const struct net_device *dev, const char *format, ...);
Joe Perchesce3fdb62019-02-02 19:47:25 -08004702__printf(2, 3) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004703void netdev_alert(const struct net_device *dev, const char *format, ...);
Joe Perchesce3fdb62019-02-02 19:47:25 -08004704__printf(2, 3) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004705void netdev_crit(const struct net_device *dev, const char *format, ...);
Joe Perchesce3fdb62019-02-02 19:47:25 -08004706__printf(2, 3) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004707void netdev_err(const struct net_device *dev, const char *format, ...);
Joe Perchesce3fdb62019-02-02 19:47:25 -08004708__printf(2, 3) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004709void netdev_warn(const struct net_device *dev, const char *format, ...);
Joe Perchesce3fdb62019-02-02 19:47:25 -08004710__printf(2, 3) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004711void netdev_notice(const struct net_device *dev, const char *format, ...);
Joe Perchesce3fdb62019-02-02 19:47:25 -08004712__printf(2, 3) __cold
Joe Perches6ea754e2014-09-22 11:10:50 -07004713void netdev_info(const struct net_device *dev, const char *format, ...);
Joe Perches571ba422010-02-09 11:49:47 +00004714
Gal Pressman375ef2b2017-09-17 13:43:58 +03004715#define netdev_level_once(level, dev, fmt, ...) \
4716do { \
4717 static bool __print_once __read_mostly; \
4718 \
4719 if (!__print_once) { \
4720 __print_once = true; \
4721 netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
4722 } \
4723} while (0)
4724
4725#define netdev_emerg_once(dev, fmt, ...) \
4726 netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
4727#define netdev_alert_once(dev, fmt, ...) \
4728 netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
4729#define netdev_crit_once(dev, fmt, ...) \
4730 netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
4731#define netdev_err_once(dev, fmt, ...) \
4732 netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
4733#define netdev_warn_once(dev, fmt, ...) \
4734 netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
4735#define netdev_notice_once(dev, fmt, ...) \
4736 netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
4737#define netdev_info_once(dev, fmt, ...) \
4738 netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
4739
Vasiliy Kulikov8909c9a2011-03-02 00:33:13 +03004740#define MODULE_ALIAS_NETDEV(device) \
4741 MODULE_ALIAS("netdev-" device)
4742
Jim Cromieb558c962011-12-19 17:11:18 -05004743#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perches571ba422010-02-09 11:49:47 +00004744#define netdev_dbg(__dev, format, args...) \
4745do { \
Jason Baronffa10cb2011-08-11 14:36:48 -04004746 dynamic_netdev_dbg(__dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00004747} while (0)
Jim Cromieb558c962011-12-19 17:11:18 -05004748#elif defined(DEBUG)
4749#define netdev_dbg(__dev, format, args...) \
4750 netdev_printk(KERN_DEBUG, __dev, format, ##args)
Joe Perches571ba422010-02-09 11:49:47 +00004751#else
4752#define netdev_dbg(__dev, format, args...) \
4753({ \
4754 if (0) \
4755 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
Joe Perches571ba422010-02-09 11:49:47 +00004756})
4757#endif
4758
4759#if defined(VERBOSE_DEBUG)
4760#define netdev_vdbg netdev_dbg
4761#else
4762
4763#define netdev_vdbg(dev, format, args...) \
4764({ \
4765 if (0) \
4766 netdev_printk(KERN_DEBUG, dev, format, ##args); \
4767 0; \
4768})
4769#endif
4770
4771/*
4772 * netdev_WARN() acts like dev_printk(), but with the key difference
4773 * of using a WARN/WARN_ON to get the message out, including the
4774 * file/line information and a backtrace.
4775 */
4776#define netdev_WARN(dev, format, args...) \
Gal Pressmane1cfe3d2018-01-07 12:08:36 +02004777 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02004778 netdev_reg_state(dev), ##args)
Joe Perches571ba422010-02-09 11:49:47 +00004779
Gal Pressman72dd8312018-01-07 12:08:35 +02004780#define netdev_WARN_ONCE(dev, format, args...) \
Gal Pressmane1cfe3d2018-01-07 12:08:36 +02004781 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
Gal Pressman375ef2b2017-09-17 13:43:58 +03004782 netdev_reg_state(dev), ##args)
4783
Joe Perchesb3d95c52010-02-09 11:49:49 +00004784/* netif printk helpers, similar to netdev_printk */
4785
4786#define netif_printk(priv, type, level, dev, fmt, args...) \
4787do { \
4788 if (netif_msg_##type(priv)) \
4789 netdev_printk(level, (dev), fmt, ##args); \
4790} while (0)
4791
Joe Perchesf45f4322010-06-27 01:02:36 +00004792#define netif_level(level, priv, type, dev, fmt, args...) \
4793do { \
4794 if (netif_msg_##type(priv)) \
4795 netdev_##level(dev, fmt, ##args); \
4796} while (0)
4797
Joe Perchesb3d95c52010-02-09 11:49:49 +00004798#define netif_emerg(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004799 netif_level(emerg, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004800#define netif_alert(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004801 netif_level(alert, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004802#define netif_crit(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004803 netif_level(crit, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004804#define netif_err(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004805 netif_level(err, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004806#define netif_warn(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004807 netif_level(warn, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004808#define netif_notice(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004809 netif_level(notice, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004810#define netif_info(priv, type, dev, fmt, args...) \
Joe Perchesf45f4322010-06-27 01:02:36 +00004811 netif_level(info, priv, type, dev, fmt, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004812
Joe Perches0053ea92012-05-30 07:43:34 +00004813#if defined(CONFIG_DYNAMIC_DEBUG)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004814#define netif_dbg(priv, type, netdev, format, args...) \
4815do { \
4816 if (netif_msg_##type(priv)) \
Jason Baronb5fb0a02011-08-11 14:36:53 -04004817 dynamic_netdev_dbg(netdev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00004818} while (0)
Joe Perches0053ea92012-05-30 07:43:34 +00004819#elif defined(DEBUG)
4820#define netif_dbg(priv, type, dev, format, args...) \
4821 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
Joe Perchesb3d95c52010-02-09 11:49:49 +00004822#else
4823#define netif_dbg(priv, type, dev, format, args...) \
4824({ \
4825 if (0) \
4826 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
4827 0; \
4828})
4829#endif
4830
Edward Creef617f272017-01-27 15:02:26 +00004831/* if @cond then downgrade to debug, else print at @level */
4832#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4833 do { \
4834 if (cond) \
4835 netif_dbg(priv, type, netdev, fmt, ##args); \
4836 else \
4837 netif_ ## level(priv, type, netdev, fmt, ##args); \
4838 } while (0)
4839
Joe Perchesb3d95c52010-02-09 11:49:49 +00004840#if defined(VERBOSE_DEBUG)
Ben Hutchingsbcfcc452010-07-02 07:08:44 +00004841#define netif_vdbg netif_dbg
Joe Perchesb3d95c52010-02-09 11:49:49 +00004842#else
4843#define netif_vdbg(priv, type, dev, format, args...) \
4844({ \
4845 if (0) \
Ben Hutchingsa4ed89c2010-05-18 06:56:32 +00004846 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
Joe Perchesb3d95c52010-02-09 11:49:49 +00004847 0; \
4848})
4849#endif
Joe Perches571ba422010-02-09 11:49:47 +00004850
Cong Wang900ff8c2013-02-18 19:20:33 +00004851/*
4852 * The list of packet types we will receive (as opposed to discard)
4853 * and the routines to invoke.
4854 *
4855 * Why 16. Because with 16 the only overlap we get on a hash of the
4856 * low nibble of the protocol value is RARP/SNAP/X.25.
4857 *
Cong Wang900ff8c2013-02-18 19:20:33 +00004858 * 0800 IP
Cong Wang900ff8c2013-02-18 19:20:33 +00004859 * 0001 802.3
4860 * 0002 AX.25
4861 * 0004 802.2
4862 * 8035 RARP
4863 * 0005 SNAP
4864 * 0805 X.25
4865 * 0806 ARP
4866 * 8137 IPX
4867 * 0009 Localtalk
4868 * 86DD IPv6
4869 */
4870#define PTYPE_HASH_SIZE (16)
4871#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
4872
Mahesh Bandewar4de83b82019-07-01 14:38:49 -07004873extern struct net_device *blackhole_netdev;
4874
Jiri Pirko385a1542009-05-27 15:48:07 -07004875#endif /* _LINUX_NETDEVICE_H */