blob: abbf5d52ec86f1194e6a4f8c3188f3515284f4f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
Al Virod7fe0f22006-12-03 23:15:30 -050033#include <linux/timer.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070034#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/atomic.h>
36#include <asm/cache.h>
37#include <asm/byteorder.h>
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/device.h>
40#include <linux/percpu.h>
Chris Leechdb217332006-06-17 21:24:58 -070041#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070042#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Daniel Lezcanoa050c332007-09-12 14:57:09 +020044#include <net/net_namespace.h>
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046struct vlan_group;
47struct ethtool_ops;
Jeff Moyer115c1d62005-06-22 22:05:31 -070048struct netpoll_info;
Johannes Berg704232c2007-04-23 12:20:05 -070049/* 802.11 specific */
50struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 /* source back-compat hooks */
52#define SET_ETHTOOL_OPS(netdev,ops) \
53 ( (netdev)->ethtool_ops = (ops) )
54
55#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
56 functions are available. */
57#define HAVE_FREE_NETDEV /* free_netdev() */
58#define HAVE_NETDEV_PRIV /* netdev_priv() */
59
60#define NET_XMIT_SUCCESS 0
61#define NET_XMIT_DROP 1 /* skb dropped */
62#define NET_XMIT_CN 2 /* congestion notification */
63#define NET_XMIT_POLICED 3 /* skb is shot by police */
64#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
65 (TC use only - dev_queue_xmit
66 returns this as NET_XMIT_SUCCESS) */
Jarek Poplawski378a2f02008-08-04 22:31:03 -070067#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69/* Backlog congestion levels */
70#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
71#define NET_RX_DROP 1 /* packet dropped */
72#define NET_RX_CN_LOW 2 /* storm alert, just in case */
73#define NET_RX_CN_MOD 3 /* Storm on its way! */
74#define NET_RX_CN_HIGH 4 /* The storm is here */
75#define NET_RX_BAD 5 /* packet dropped due to kernel error */
76
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -020077/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
78 * indicates that the device will soon be dropping packets, or already drops
79 * some packets of the same priority; prompting us to send less aggressively. */
80#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
82
83#endif
84
85#define MAX_ADDR_LEN 32 /* Largest hardware address length */
86
87/* Driver transmit return codes */
88#define NETDEV_TX_OK 0 /* driver took care of packet */
89#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
90#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
91
Adrian Bunkc88e6f52008-06-27 19:54:54 -070092#ifdef __KERNEL__
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/*
95 * Compute the worst case header length according to the protocols
96 * used.
97 */
98
David S. Miller8388e3d2008-05-12 20:17:33 -070099#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
100# if defined(CONFIG_MAC80211_MESH)
101# define LL_MAX_HEADER 128
102# else
103# define LL_MAX_HEADER 96
104# endif
105#elif defined(CONFIG_TR)
106# define LL_MAX_HEADER 48
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700108# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#endif
110
David S. Millere81c73592006-11-28 20:53:39 -0800111#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
112 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
113 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
114 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define MAX_HEADER LL_MAX_HEADER
116#else
117#define MAX_HEADER (LL_MAX_HEADER + 48)
118#endif
119
Adrian Bunkc88e6f52008-06-27 19:54:54 -0700120#endif /* __KERNEL__ */
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122/*
123 * Network device statistics. Akin to the 2.0 ether stats but
124 * with byte counters.
125 */
126
127struct net_device_stats
128{
129 unsigned long rx_packets; /* total packets received */
130 unsigned long tx_packets; /* total packets transmitted */
131 unsigned long rx_bytes; /* total bytes received */
132 unsigned long tx_bytes; /* total bytes transmitted */
133 unsigned long rx_errors; /* bad packets received */
134 unsigned long tx_errors; /* packet transmit problems */
135 unsigned long rx_dropped; /* no space in linux buffers */
136 unsigned long tx_dropped; /* no space available in linux */
137 unsigned long multicast; /* multicast packets received */
138 unsigned long collisions;
139
140 /* detailed rx_errors: */
141 unsigned long rx_length_errors;
142 unsigned long rx_over_errors; /* receiver ring buff overflow */
143 unsigned long rx_crc_errors; /* recved pkt with crc error */
144 unsigned long rx_frame_errors; /* recv'd frame alignment error */
145 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
146 unsigned long rx_missed_errors; /* receiver missed packet */
147
148 /* detailed tx_errors */
149 unsigned long tx_aborted_errors;
150 unsigned long tx_carrier_errors;
151 unsigned long tx_fifo_errors;
152 unsigned long tx_heartbeat_errors;
153 unsigned long tx_window_errors;
154
155 /* for cslip etc */
156 unsigned long rx_compressed;
157 unsigned long tx_compressed;
158};
159
160
161/* Media selection options. */
162enum {
163 IF_PORT_UNKNOWN = 0,
164 IF_PORT_10BASE2,
165 IF_PORT_10BASET,
166 IF_PORT_AUI,
167 IF_PORT_100BASET,
168 IF_PORT_100BASETX,
169 IF_PORT_100BASEFX
170};
171
172#ifdef __KERNEL__
173
174#include <linux/cache.h>
175#include <linux/skbuff.h>
176
177struct neighbour;
178struct neigh_parms;
179struct sk_buff;
180
181struct netif_rx_stats
182{
183 unsigned total;
184 unsigned dropped;
185 unsigned time_squeeze;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 unsigned cpu_collision;
187};
188
189DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
190
Patrick McHardybf742482007-06-27 01:26:19 -0700191struct dev_addr_list
192{
193 struct dev_addr_list *next;
194 u8 da_addr[MAX_ADDR_LEN];
195 u8 da_addrlen;
Patrick McHardya0a400d2007-07-14 18:52:02 -0700196 u8 da_synced;
Patrick McHardybf742482007-06-27 01:26:19 -0700197 int da_users;
198 int da_gusers;
199};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201/*
202 * We tag multicasts with these structures.
203 */
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700204
205#define dev_mc_list dev_addr_list
206#define dmi_addr da_addr
207#define dmi_addrlen da_addrlen
208#define dmi_users da_users
209#define dmi_gusers da_gusers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211struct hh_cache
212{
213 struct hh_cache *hh_next; /* Next entry */
214 atomic_t hh_refcnt; /* number of users */
Eric Dumazetf0490982006-12-08 00:08:43 -0800215/*
216 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
217 * cache line on SMP.
218 * They are mostly read, but hh_refcnt may be changed quite frequently,
219 * incurring cache line ping pongs.
220 */
221 __be16 hh_type ____cacheline_aligned_in_smp;
222 /* protocol identifier, f.e ETH_P_IP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 * NOTE: For VLANs, this will be the
224 * encapuslated type. --BLG
225 */
Arnaldo Carvalho de Melod5c42c02006-11-27 17:58:02 -0200226 u16 hh_len; /* length of header */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 int (*hh_output)(struct sk_buff *skb);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800228 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 /* cached hardware header; allow for machine alignment needs. */
231#define HH_DATA_MOD 16
232#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700233 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234#define HH_DATA_ALIGN(__len) \
235 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
236 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
237};
238
239/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
240 * Alternative is:
241 * dev->hard_header_len ? (dev->hard_header_len +
242 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
243 *
244 * We could use other alignment values, but we must maintain the
245 * relationship HH alignment <= LL alignment.
Johannes Bergf5184d22008-05-12 20:48:31 -0700246 *
247 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
248 * may need.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 */
250#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700251 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700253 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
254#define LL_ALLOCATED_SPACE(dev) \
255 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700257struct header_ops {
258 int (*create) (struct sk_buff *skb, struct net_device *dev,
259 unsigned short type, const void *daddr,
260 const void *saddr, unsigned len);
261 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
262 int (*rebuild)(struct sk_buff *skb);
263#define HAVE_HEADER_CACHE
264 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
265 void (*cache_update)(struct hh_cache *hh,
266 const struct net_device *dev,
267 const unsigned char *haddr);
268};
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270/* These flag bits are private to the generic network queueing
271 * layer, they may not be explicitly referenced by any other
272 * code.
273 */
274
275enum netdev_state_t
276{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 __LINK_STATE_START,
278 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800280 __LINK_STATE_LINKWATCH_PENDING,
281 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282};
283
284
285/*
286 * This structure holds at boot time configured netdevice settings. They
287 * are then used in the device probing.
288 */
289struct netdev_boot_setup {
290 char name[IFNAMSIZ];
291 struct ifmap map;
292};
293#define NETDEV_BOOT_SETUP_MAX 8
294
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -0300295extern int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700298 * Structure for NAPI scheduling similar to tasklet but with weighting
299 */
300struct napi_struct {
301 /* The poll_list must only be managed by the entity which
302 * changes the state of the NAPI_STATE_SCHED bit. This means
303 * whoever atomically sets that bit can add this napi_struct
304 * to the per-cpu poll_list, and whoever clears that bit
305 * can remove from the list right before clearing the bit.
306 */
307 struct list_head poll_list;
308
309 unsigned long state;
310 int weight;
311 int (*poll)(struct napi_struct *, int);
312#ifdef CONFIG_NETPOLL
313 spinlock_t poll_lock;
314 int poll_owner;
315 struct net_device *dev;
316 struct list_head dev_list;
317#endif
318};
319
320enum
321{
322 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800323 NAPI_STATE_DISABLE, /* Disable pending */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700324};
325
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800326extern void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700327
David S. Millera0a46192008-01-07 20:35:07 -0800328static inline int napi_disable_pending(struct napi_struct *n)
329{
330 return test_bit(NAPI_STATE_DISABLE, &n->state);
331}
332
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700333/**
334 * napi_schedule_prep - check if napi can be scheduled
335 * @n: napi context
336 *
337 * Test if NAPI routine is already running, and if not mark
338 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800339 * insure only one NAPI poll instance runs. We also make
340 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700341 */
342static inline int napi_schedule_prep(struct napi_struct *n)
343{
David S. Millera0a46192008-01-07 20:35:07 -0800344 return !napi_disable_pending(n) &&
345 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700346}
347
348/**
349 * napi_schedule - schedule NAPI poll
350 * @n: napi context
351 *
352 * Schedule NAPI poll routine to be called if it is not already
353 * running.
354 */
355static inline void napi_schedule(struct napi_struct *n)
356{
357 if (napi_schedule_prep(n))
358 __napi_schedule(n);
359}
360
Roland Dreierbfe13f52007-10-09 15:47:37 -0700361/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
362static inline int napi_reschedule(struct napi_struct *napi)
363{
364 if (napi_schedule_prep(napi)) {
365 __napi_schedule(napi);
366 return 1;
367 }
368 return 0;
369}
370
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700371/**
372 * napi_complete - NAPI processing complete
373 * @n: napi context
374 *
375 * Mark NAPI processing as complete.
376 */
377static inline void __napi_complete(struct napi_struct *n)
378{
379 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
380 list_del(&n->poll_list);
381 smp_mb__before_clear_bit();
382 clear_bit(NAPI_STATE_SCHED, &n->state);
383}
384
385static inline void napi_complete(struct napi_struct *n)
386{
David S. Miller50fd4402008-03-27 17:42:50 -0700387 unsigned long flags;
388
389 local_irq_save(flags);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700390 __napi_complete(n);
David S. Miller50fd4402008-03-27 17:42:50 -0700391 local_irq_restore(flags);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700392}
393
394/**
395 * napi_disable - prevent NAPI from scheduling
396 * @n: napi context
397 *
398 * Stop NAPI from being scheduled on this context.
399 * Waits till any outstanding processing completes.
400 */
401static inline void napi_disable(struct napi_struct *n)
402{
David S. Millera0a46192008-01-07 20:35:07 -0800403 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700404 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700405 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800406 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700407}
408
409/**
410 * napi_enable - enable NAPI scheduling
411 * @n: napi context
412 *
413 * Resume NAPI from being scheduled on this context.
414 * Must be paired with napi_disable.
415 */
416static inline void napi_enable(struct napi_struct *n)
417{
418 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
419 smp_mb__before_clear_bit();
420 clear_bit(NAPI_STATE_SCHED, &n->state);
421}
422
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700423#ifdef CONFIG_SMP
424/**
425 * napi_synchronize - wait until NAPI is not running
426 * @n: napi context
427 *
428 * Wait until NAPI is done being scheduled on this context.
429 * Waits till any outstanding processing completes but
430 * does not disable future activations.
431 */
432static inline void napi_synchronize(const struct napi_struct *n)
433{
434 while (test_bit(NAPI_STATE_SCHED, &n->state))
435 msleep(1);
436}
437#else
438# define napi_synchronize(n) barrier()
439#endif
440
David S. Miller79d16382008-07-08 23:14:46 -0700441enum netdev_queue_state_t
442{
443 __QUEUE_STATE_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700444 __QUEUE_STATE_FROZEN,
David S. Miller79d16382008-07-08 23:14:46 -0700445};
446
David S. Millerbb949fb2008-07-08 16:55:56 -0700447struct netdev_queue {
448 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700449 struct Qdisc *qdisc;
David S. Miller79d16382008-07-08 23:14:46 -0700450 unsigned long state;
David S. Millerc773e842008-07-08 23:13:53 -0700451 spinlock_t _xmit_lock;
452 int xmit_lock_owner;
David S. Millerb0e1e642008-07-08 17:42:10 -0700453 struct Qdisc *qdisc_sleeping;
David S. Millere8a04642008-07-17 00:34:19 -0700454} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700455
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700456/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 * The DEVICE structure.
458 * Actually, this whole structure is a big mistake. It mixes I/O
459 * data with strictly "high-level" data, and it has to know about
460 * almost every data structure used in the INET module.
461 *
462 * FIXME: cleanup struct net_device such that network protocol info
463 * moves out.
464 */
465
466struct net_device
467{
468
469 /*
470 * This is the first field of the "visible" part of this structure
471 * (i.e. as seen by users in the "Space.c" file). It is the name
472 * the interface.
473 */
474 char name[IFNAMSIZ];
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700475 /* device name hash chain */
476 struct hlist_node name_hlist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 /*
479 * I/O specific fields
480 * FIXME: Merge these and struct ifmap into one
481 */
482 unsigned long mem_end; /* shared mem end */
483 unsigned long mem_start; /* shared mem start */
484 unsigned long base_addr; /* device I/O address */
485 unsigned int irq; /* device IRQ number */
486
487 /*
488 * Some hardware also needs these fields, but they are not
489 * part of the usual set specified in Space.c.
490 */
491
492 unsigned char if_port; /* Selectable AUI, TP,..*/
493 unsigned char dma; /* DMA channel */
494
495 unsigned long state;
496
Pavel Emelianov7562f872007-05-03 15:13:45 -0700497 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700498#ifdef CONFIG_NETPOLL
499 struct list_head napi_list;
500#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 /* The device initialization function. Called only once. */
503 int (*init)(struct net_device *dev);
504
505 /* ------- Fields preinitialized in Space.c finish here ------- */
506
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700507 /* Net device features */
508 unsigned long features;
509#define NETIF_F_SG 1 /* Scatter/gather IO. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700510#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700511#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
512#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700513#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700514#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
515#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
516#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
517#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
518#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
519#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
Herbert Xu37c31852006-06-22 03:07:29 -0700520#define NETIF_F_GSO 2048 /* Enable software GSO. */
Christian Borntraegere24eb522007-09-25 19:42:02 -0700521#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
522 /* do not use LLTX in new drivers */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200523#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
Jeff Garzik3ae7c0b2007-08-15 16:00:51 -0700524#define NETIF_F_LRO 32768 /* large receive offload */
Herbert Xu79671682006-06-22 02:40:14 -0700525
526 /* Segmentation offload features */
Patrick McHardy289c79a2008-05-23 00:22:04 -0700527#define NETIF_F_GSO_SHIFT 16
528#define NETIF_F_GSO_MASK 0xffff0000
Herbert Xu79671682006-06-22 02:40:14 -0700529#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700530#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
Herbert Xu576a30e2006-06-27 13:22:38 -0700531#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700532#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
533#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700534
Herbert Xu78eb8872006-08-17 18:22:32 -0700535 /* List of features with software fallbacks. */
536#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
537
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700538
Herbert Xu8648b302006-06-17 22:06:05 -0700539#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700540#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
541#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
542#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
Herbert Xu8648b302006-06-17 22:06:05 -0700543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /* Interface index. Unique device identifier */
545 int ifindex;
546 int iflink;
547
548
549 struct net_device_stats* (*get_stats)(struct net_device *dev);
Rusty Russellc45d2862007-03-28 14:29:08 -0700550 struct net_device_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Johannes Bergb86e0282007-04-26 20:48:23 -0700552#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 /* List of functions to handle Wireless Extensions (instead of ioctl).
554 * See <net/iw_handler.h> for details. Jean II */
555 const struct iw_handler_def * wireless_handlers;
556 /* Instance data managed by the core of Wireless Extensions. */
557 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -0700558#endif
Stephen Hemminger76fd8592006-09-08 11:16:13 -0700559 const struct ethtool_ops *ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700561 /* Hardware header description */
562 const struct header_ops *header_ops;
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 /*
565 * This marks the end of the "visible" part of the structure. All
566 * fields hereafter are internal to the system, and may change at
567 * will (read: may be cleaned up at will).
568 */
569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Stefan Rompfb00055a2006-03-20 17:09:11 -0800571 unsigned int flags; /* interface flags (a la BSD) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 unsigned short gflags;
573 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
574 unsigned short padded; /* How much padding added by alloc_netdev() */
575
Stefan Rompfb00055a2006-03-20 17:09:11 -0800576 unsigned char operstate; /* RFC2863 operstate */
577 unsigned char link_mode; /* mapping policy to operstate */
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 unsigned mtu; /* interface MTU value */
580 unsigned short type; /* interface hardware type */
581 unsigned short hard_header_len; /* hardware hdr length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Johannes Bergf5184d22008-05-12 20:48:31 -0700583 /* extra head- and tailroom the hardware may need, but not in all cases
584 * can this be guaranteed, especially tailroom. Some cases also use
585 * LL_MAX_HEADER instead to allocate the skb.
586 */
587 unsigned short needed_headroom;
588 unsigned short needed_tailroom;
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 struct net_device *master; /* Pointer to master device of a group,
591 * which this device is member of.
592 */
593
594 /* Interface address info. */
Jon Wetzela6f9a702005-08-20 17:15:54 -0700595 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 unsigned char addr_len; /* hardware address length */
597 unsigned short dev_id; /* for shared network cards */
598
David S. Millerf1f28aa2008-07-15 00:08:33 -0700599 spinlock_t addr_list_lock;
Patrick McHardy4417da62007-06-27 01:28:10 -0700600 struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
601 int uc_count; /* Number of installed ucasts */
602 int uc_promisc;
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700603 struct dev_addr_list *mc_list; /* Multicast mac addresses */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 int mc_count; /* Number of installed mcasts */
Wang Chen9d45abe2008-06-17 21:12:48 -0700605 unsigned int promiscuity;
606 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 /* Protocol specific pointers */
610
611 void *atalk_ptr; /* AppleTalk link */
612 void *ip_ptr; /* IPv4 specific data */
613 void *dn_ptr; /* DECnet specific data */
614 void *ip6_ptr; /* IPv6 specific data */
615 void *ec_ptr; /* Econet specific data */
616 void *ax25_ptr; /* AX.25 specific data */
Johannes Berg704232c2007-04-23 12:20:05 -0700617 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
618 assign before registering */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700620/*
621 * Cache line mostly used on receive path (including eth_type_trans())
622 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700623 unsigned long last_rx; /* Time of last Rx */
624 /* Interface address info used in eth_type_trans() */
625 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
626 because most packets are unicast) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700628 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
629
David S. Millerbb949fb2008-07-08 16:55:56 -0700630 struct netdev_queue rx_queue;
David S. Millere8a04642008-07-17 00:34:19 -0700631
632 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700633
634 /* Number of TX queues allocated at alloc_netdev_mq() time */
David S. Millere8a04642008-07-17 00:34:19 -0700635 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700636
637 /* Number of TX queues currently active in device */
638 unsigned int real_num_tx_queues;
639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 unsigned long tx_queue_len; /* Max frames per queue allowed */
David S. Millerc3f26a22008-07-31 16:58:50 -0700641 spinlock_t tx_global_lock;
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700642/*
643 * One part is mostly used on xmit path (device)
644 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700645 void *priv; /* pointer to private data */
646 int (*hard_start_xmit) (struct sk_buff *skb,
647 struct net_device *dev);
648 /* These may be needed for future network-power-down code. */
649 unsigned long trans_start; /* Time (in jiffies) of last Tx */
650
651 int watchdog_timeo; /* used by dev_watchdog() */
652 struct timer_list watchdog_timer;
653
654/*
655 * refcnt is a very hot point, so align it on SMP
656 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 /* Number of references to this device */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700658 atomic_t refcnt ____cacheline_aligned_in_smp;
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 /* delayed register/unregister */
661 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 /* device index hash chain */
663 struct hlist_node index_hlist;
664
Herbert Xu572a1032007-05-08 18:34:17 -0700665 struct net_device *link_watch_next;
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 /* register/unregister state machine */
668 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -0700669 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 NETREG_UNREGISTERING, /* called unregister_netdevice */
671 NETREG_UNREGISTERED, /* completed unregister todo */
672 NETREG_RELEASED, /* called free_netdev */
673 } reg_state;
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 /* Called after device is detached from network. */
676 void (*uninit)(struct net_device *dev);
677 /* Called after last user reference disappears. */
678 void (*destructor)(struct net_device *dev);
679
680 /* Pointers to interface service routines. */
681 int (*open)(struct net_device *dev);
682 int (*stop)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683#define HAVE_NETDEV_POLL
Patrick McHardy24023452007-07-14 18:51:31 -0700684#define HAVE_CHANGE_RX_FLAGS
685 void (*change_rx_flags)(struct net_device *dev,
686 int flags);
Patrick McHardy4417da62007-06-27 01:28:10 -0700687#define HAVE_SET_RX_MODE
688 void (*set_rx_mode)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689#define HAVE_MULTICAST
690 void (*set_multicast_list)(struct net_device *dev);
691#define HAVE_SET_MAC_ADDR
692 int (*set_mac_address)(struct net_device *dev,
693 void *addr);
Jeff Garzikbada3392007-10-23 20:19:37 -0700694#define HAVE_VALIDATE_ADDR
695 int (*validate_addr)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696#define HAVE_PRIVATE_IOCTL
697 int (*do_ioctl)(struct net_device *dev,
698 struct ifreq *ifr, int cmd);
699#define HAVE_SET_CONFIG
700 int (*set_config)(struct net_device *dev,
701 struct ifmap *map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702#define HAVE_CHANGE_MTU
703 int (*change_mtu)(struct net_device *dev, int new_mtu);
704
705#define HAVE_TX_TIMEOUT
706 void (*tx_timeout) (struct net_device *dev);
707
708 void (*vlan_rx_register)(struct net_device *dev,
709 struct vlan_group *grp);
710 void (*vlan_rx_add_vid)(struct net_device *dev,
711 unsigned short vid);
712 void (*vlan_rx_kill_vid)(struct net_device *dev,
713 unsigned short vid);
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
716#ifdef CONFIG_NETPOLL
Jeff Moyer115c1d62005-06-22 22:05:31 -0700717 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718#endif
719#ifdef CONFIG_NET_POLL_CONTROLLER
720 void (*poll_controller)(struct net_device *dev);
721#endif
722
David S. Millereae792b2008-07-15 03:03:33 -0700723 u16 (*select_queue)(struct net_device *dev,
724 struct sk_buff *skb);
725
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900726#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200727 /* Network namespace this network device is inside */
728 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900729#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200730
David S. Miller49517042008-05-12 03:29:11 -0700731 /* mid-layer private */
732 void *ml_priv;
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 /* bridge stuff */
735 struct net_bridge_port *br_port;
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700736 /* macvlan */
737 struct macvlan_port *macvlan_port;
Patrick McHardyeca9eba2008-07-05 21:26:13 -0700738 /* GARP */
739 struct garp_port *garp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 /* class/net/name entry */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700742 struct device dev;
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -0700743 /* space for optional statistics and wireless sysfs groups */
744 struct attribute_group *sysfs_groups[3];
Patrick McHardy38f7b872007-06-13 12:03:51 -0700745
746 /* rtnetlink link ops */
747 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700748
Patrick McHardy289c79a2008-05-23 00:22:04 -0700749 /* VLAN feature mask */
750 unsigned long vlan_features;
751
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -0700752 /* for setting kernel sock attribute on TCP connection setup */
753#define GSO_MAX_SIZE 65536
754 unsigned int gso_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700756#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758#define NETDEV_ALIGN 32
759#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
760
David S. Millere8a04642008-07-17 00:34:19 -0700761static inline
762struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
763 unsigned int index)
764{
765 return &dev->_tx[index];
766}
767
768static inline void netdev_for_each_tx_queue(struct net_device *dev,
769 void (*f)(struct net_device *,
770 struct netdev_queue *,
771 void *),
772 void *arg)
773{
774 unsigned int i;
775
776 for (i = 0; i < dev->num_tx_queues; i++)
777 f(dev, &dev->_tx[i], arg);
778}
779
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900780/*
781 * Net namespace inlines
782 */
783static inline
784struct net *dev_net(const struct net_device *dev)
785{
786#ifdef CONFIG_NET_NS
787 return dev->nd_net;
788#else
789 return &init_net;
790#endif
791}
792
793static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -0700794void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900795{
796#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -0700797 release_net(dev->nd_net);
798 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900799#endif
800}
801
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700802/**
803 * netdev_priv - access network device private data
804 * @dev: network device
805 *
806 * Get network device private data
807 */
Patrick McHardy6472ce62007-06-13 12:03:21 -0700808static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
David S. Millere3c50d52008-07-15 02:58:39 -0700810 return (char *)dev + ((sizeof(struct net_device)
811 + NETDEV_ALIGN_CONST)
812 & ~NETDEV_ALIGN_CONST);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813}
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815/* Set the sysfs physical device reference for the network logical device
816 * if set prior to registration will cause a symlink during initialization.
817 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700818#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Stephen Hemminger3b582cc2007-11-01 02:21:47 -0700820/**
821 * netif_napi_add - initialize a napi context
822 * @dev: network device
823 * @napi: napi context
824 * @poll: polling function
825 * @weight: default weight
826 *
827 * netif_napi_add() must be used to initialize a napi context prior to calling
828 * *any* of the other napi related functions.
829 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700830static inline void netif_napi_add(struct net_device *dev,
831 struct napi_struct *napi,
832 int (*poll)(struct napi_struct *, int),
833 int weight)
834{
835 INIT_LIST_HEAD(&napi->poll_list);
836 napi->poll = poll;
837 napi->weight = weight;
838#ifdef CONFIG_NETPOLL
839 napi->dev = dev;
840 list_add(&napi->dev_list, &dev->napi_list);
841 spin_lock_init(&napi->poll_lock);
842 napi->poll_owner = -1;
843#endif
844 set_bit(NAPI_STATE_SCHED, &napi->state);
845}
846
Alexander Duyckd8156532008-07-08 15:13:05 -0700847/**
848 * netif_napi_del - remove a napi context
849 * @napi: napi context
850 *
851 * netif_napi_del() removes a napi context from the network device napi list
852 */
853static inline void netif_napi_del(struct napi_struct *napi)
854{
855#ifdef CONFIG_NETPOLL
856 list_del(&napi->dev_list);
857#endif
858}
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -0700861 __be16 type; /* This is really htons(ether_type). */
862 struct net_device *dev; /* NULL is wildcarded here */
863 int (*func) (struct sk_buff *,
864 struct net_device *,
865 struct packet_type *,
866 struct net_device *);
Herbert Xu576a30e2006-06-27 13:22:38 -0700867 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
868 int features);
Herbert Xua430a432006-07-08 13:34:56 -0700869 int (*gso_send_check)(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 void *af_packet_priv;
871 struct list_head list;
872};
873
874#include <linux/interrupt.h>
875#include <linux/notifier.h>
876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877extern rwlock_t dev_base_lock; /* Device list lock */
878
Eric W. Biederman881d9662007-09-17 11:56:21 -0700879
880#define for_each_netdev(net, d) \
881 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
882#define for_each_netdev_safe(net, d, n) \
883 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
884#define for_each_netdev_continue(net, d) \
885 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700886#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
887
Daniel Lezcanoa050c332007-09-12 14:57:09 +0200888static inline struct net_device *next_net_device(struct net_device *dev)
889{
890 struct list_head *lh;
891 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -0700892
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900893 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +0200894 lh = dev->dev_list.next;
895 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
896}
897
898static inline struct net_device *first_net_device(struct net *net)
899{
900 return list_empty(&net->dev_base_head) ? NULL :
901 net_device_entry(net->dev_base_head.next);
902}
Pavel Emelianov7562f872007-05-03 15:13:45 -0700903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904extern int netdev_boot_setup_check(struct net_device *dev);
905extern unsigned long netdev_boot_base(const char *prefix, int unit);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700906extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
907extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
908extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909extern void dev_add_pack(struct packet_type *pt);
910extern void dev_remove_pack(struct packet_type *pt);
911extern void __dev_remove_pack(struct packet_type *pt);
912
Eric W. Biederman881d9662007-09-17 11:56:21 -0700913extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 unsigned short mask);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700915extern struct net_device *dev_get_by_name(struct net *net, const char *name);
916extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917extern int dev_alloc_name(struct net_device *dev, const char *name);
918extern int dev_open(struct net_device *dev);
919extern int dev_close(struct net_device *dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -0700920extern void dev_disable_lro(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921extern int dev_queue_xmit(struct sk_buff *skb);
922extern int register_netdevice(struct net_device *dev);
Stephen Hemminger22f8cde2007-02-07 00:09:58 -0800923extern void unregister_netdevice(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924extern void free_netdev(struct net_device *dev);
925extern void synchronize_net(void);
926extern int register_netdevice_notifier(struct notifier_block *nb);
927extern int unregister_netdevice_notifier(struct notifier_block *nb);
Eric W. Biedermanad7379d2007-09-16 15:33:32 -0700928extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700929extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
930extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931extern int dev_restart(struct net_device *dev);
932#ifdef CONFIG_NETPOLL_TRAP
933extern int netpoll_trap(void);
934#endif
935
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700936static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
937 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700938 const void *daddr, const void *saddr,
939 unsigned len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700940{
Ursula Braunf1ecfd52007-10-22 16:16:14 +0200941 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700942 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700943
944 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700945}
946
Stephen Hemmingerb95cce32007-09-26 22:13:38 -0700947static inline int dev_parse_header(const struct sk_buff *skb,
948 unsigned char *haddr)
949{
950 const struct net_device *dev = skb->dev;
951
Patrick McHardy1b833362007-10-18 05:09:28 -0700952 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -0700953 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700954 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -0700955}
956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
958extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
959static inline int unregister_gifconf(unsigned int family)
960{
961 return register_gifconf(family, NULL);
962}
963
964/*
965 * Incoming packets are placed on per-cpu queues so that
966 * no locking is needed.
967 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968struct softnet_data
969{
David S. Miller37437bb2008-07-16 02:15:04 -0700970 struct Qdisc *output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 struct sk_buff_head input_pkt_queue;
972 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 struct sk_buff *completion_queue;
974
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700975 struct napi_struct backlog;
Chris Leechdb217332006-06-17 21:24:58 -0700976#ifdef CONFIG_NET_DMA
977 struct dma_chan *net_dma;
978#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979};
980
981DECLARE_PER_CPU(struct softnet_data,softnet_data);
982
983#define HAVE_NETIF_QUEUE
984
David S. Miller37437bb2008-07-16 02:15:04 -0700985extern void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -0700986
987static inline void netif_schedule_queue(struct netdev_queue *txq)
988{
David S. Miller79d16382008-07-08 23:14:46 -0700989 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -0700990 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -0700991}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700993static inline void netif_tx_schedule_all(struct net_device *dev)
994{
995 unsigned int i;
996
997 for (i = 0; i < dev->num_tx_queues; i++)
998 netif_schedule_queue(netdev_get_tx_queue(dev, i));
999}
1000
Dave Jonesd29f7492008-07-22 14:09:06 -07001001static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1002{
1003 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1004}
1005
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001006/**
1007 * netif_start_queue - allow transmit
1008 * @dev: network device
1009 *
1010 * Allow upper layers to call the device hard_start_xmit routine.
1011 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012static inline void netif_start_queue(struct net_device *dev)
1013{
David S. Millere8a04642008-07-17 00:34:19 -07001014 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001017static inline void netif_tx_start_all_queues(struct net_device *dev)
1018{
1019 unsigned int i;
1020
1021 for (i = 0; i < dev->num_tx_queues; i++) {
1022 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1023 netif_tx_start_queue(txq);
1024 }
1025}
1026
David S. Miller79d16382008-07-08 23:14:46 -07001027static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
1029#ifdef CONFIG_NETPOLL_TRAP
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001030 if (netpoll_trap()) {
David S. Miller79d16382008-07-08 23:14:46 -07001031 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 return;
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001033 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034#endif
David S. Miller79d16382008-07-08 23:14:46 -07001035 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001036 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07001037}
1038
Dave Jonesd29f7492008-07-22 14:09:06 -07001039/**
1040 * netif_wake_queue - restart transmit
1041 * @dev: network device
1042 *
1043 * Allow upper layers to call the device hard_start_xmit routine.
1044 * Used for flow control when transmit resources are available.
1045 */
David S. Miller79d16382008-07-08 23:14:46 -07001046static inline void netif_wake_queue(struct net_device *dev)
1047{
David S. Millere8a04642008-07-17 00:34:19 -07001048 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001051static inline void netif_tx_wake_all_queues(struct net_device *dev)
1052{
1053 unsigned int i;
1054
1055 for (i = 0; i < dev->num_tx_queues; i++) {
1056 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1057 netif_tx_wake_queue(txq);
1058 }
1059}
1060
Dave Jonesd29f7492008-07-22 14:09:06 -07001061static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1062{
1063 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1064}
1065
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001066/**
1067 * netif_stop_queue - stop transmitted packets
1068 * @dev: network device
1069 *
1070 * Stop upper layers calling the device hard_start_xmit routine.
1071 * Used for flow control when transmit resources are unavailable.
1072 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073static inline void netif_stop_queue(struct net_device *dev)
1074{
David S. Millere8a04642008-07-17 00:34:19 -07001075 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076}
1077
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001078static inline void netif_tx_stop_all_queues(struct net_device *dev)
1079{
1080 unsigned int i;
1081
1082 for (i = 0; i < dev->num_tx_queues; i++) {
1083 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1084 netif_tx_stop_queue(txq);
1085 }
1086}
1087
Dave Jonesd29f7492008-07-22 14:09:06 -07001088static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1089{
1090 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1091}
1092
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001093/**
1094 * netif_queue_stopped - test if transmit queue is flowblocked
1095 * @dev: network device
1096 *
1097 * Test if transmit queue on device is currently unable to send.
1098 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099static inline int netif_queue_stopped(const struct net_device *dev)
1100{
David S. Millere8a04642008-07-17 00:34:19 -07001101 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102}
1103
David S. Millerc3f26a22008-07-31 16:58:50 -07001104static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1105{
1106 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1107}
1108
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001109/**
1110 * netif_running - test if up
1111 * @dev: network device
1112 *
1113 * Test if the device has been brought up.
1114 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115static inline int netif_running(const struct net_device *dev)
1116{
1117 return test_bit(__LINK_STATE_START, &dev->state);
1118}
1119
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001120/*
1121 * Routines to manage the subqueues on a device. We only need start
1122 * stop, and a check if it's stopped. All other device management is
1123 * done at the overall netdevice level.
1124 * Also test the device if we're multiqueue.
1125 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001126
1127/**
1128 * netif_start_subqueue - allow sending packets on subqueue
1129 * @dev: network device
1130 * @queue_index: sub queue index
1131 *
1132 * Start individual transmit queue of a device with multiple transmit queues.
1133 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001134static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1135{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001136 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1137 clear_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001138}
1139
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001140/**
1141 * netif_stop_subqueue - stop sending packets on subqueue
1142 * @dev: network device
1143 * @queue_index: sub queue index
1144 *
1145 * Stop individual transmit queue of a device with multiple transmit queues.
1146 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001147static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1148{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001149 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001150#ifdef CONFIG_NETPOLL_TRAP
1151 if (netpoll_trap())
1152 return;
1153#endif
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001154 set_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001155}
1156
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001157/**
1158 * netif_subqueue_stopped - test status of subqueue
1159 * @dev: network device
1160 * @queue_index: sub queue index
1161 *
1162 * Check individual transmit queue of a device with multiple transmit queues.
1163 */
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001164static inline int __netif_subqueue_stopped(const struct net_device *dev,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001165 u16 queue_index)
1166{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001167 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1168 return test_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001169}
1170
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001171static inline int netif_subqueue_stopped(const struct net_device *dev,
1172 struct sk_buff *skb)
1173{
1174 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1175}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001176
1177/**
1178 * netif_wake_subqueue - allow sending packets on subqueue
1179 * @dev: network device
1180 * @queue_index: sub queue index
1181 *
1182 * Resume individual transmit queue of a device with multiple transmit queues.
1183 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001184static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1185{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001186 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001187#ifdef CONFIG_NETPOLL_TRAP
1188 if (netpoll_trap())
1189 return;
1190#endif
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001191 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001192 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001193}
1194
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001195/**
1196 * netif_is_multiqueue - test if device has multiple transmit queues
1197 * @dev: network device
1198 *
1199 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001200 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001201static inline int netif_is_multiqueue(const struct net_device *dev)
1202{
David S. Miller09e83b52008-07-17 01:52:12 -07001203 return (dev->num_tx_queues > 1);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001204}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206/* Use this variant when it is known for sure that it
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001207 * is executing from hardware interrupt context or with hardware interrupts
1208 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001210extern void dev_kfree_skb_irq(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
1212/* Use this variant in places where it could be invoked
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001213 * from either hardware interrupt or other context, with hardware interrupts
1214 * either disabled or enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001216extern void dev_kfree_skb_any(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
1218#define HAVE_NETIF_RX 1
1219extern int netif_rx(struct sk_buff *skb);
1220extern int netif_rx_ni(struct sk_buff *skb);
1221#define HAVE_NETIF_RECEIVE_SKB 1
1222extern int netif_receive_skb(struct sk_buff *skb);
Patrick McHardybc1d0412008-07-14 22:49:30 -07001223extern void netif_nit_deliver(struct sk_buff *skb);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08001224extern int dev_valid_name(const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001225extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1226extern int dev_ethtool(struct net *net, struct ifreq *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227extern unsigned dev_get_flags(const struct net_device *);
1228extern int dev_change_flags(struct net_device *, unsigned);
1229extern int dev_change_name(struct net_device *, char *);
Eric W. Biedermance286d32007-09-12 13:53:49 +02001230extern int dev_change_net_namespace(struct net_device *,
1231 struct net *, const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232extern int dev_set_mtu(struct net_device *, int);
1233extern int dev_set_mac_address(struct net_device *,
1234 struct sockaddr *);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001235extern int dev_hard_start_xmit(struct sk_buff *skb,
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001236 struct net_device *dev,
1237 struct netdev_queue *txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001239extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241/* Called by rtnetlink.c:rtnl_unlock() */
1242extern void netdev_run_todo(void);
1243
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001244/**
1245 * dev_put - release reference to device
1246 * @dev: network device
1247 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001248 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001249 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250static inline void dev_put(struct net_device *dev)
1251{
1252 atomic_dec(&dev->refcnt);
1253}
1254
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001255/**
1256 * dev_hold - get reference to device
1257 * @dev: network device
1258 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001259 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001260 */
Stephen Hemminger15333062006-03-20 22:32:28 -08001261static inline void dev_hold(struct net_device *dev)
1262{
1263 atomic_inc(&dev->refcnt);
1264}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
1266/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1267 * and _off may be called from IRQ context, but it is caller
1268 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08001269 *
1270 * The name carrier is inappropriate, these functions should really be
1271 * called netif_lowerlayer_*() because they represent the state of any
1272 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 */
1274
1275extern void linkwatch_fire_event(struct net_device *dev);
1276
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001277/**
1278 * netif_carrier_ok - test if carrier present
1279 * @dev: network device
1280 *
1281 * Check if carrier is present on device
1282 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283static inline int netif_carrier_ok(const struct net_device *dev)
1284{
1285 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1286}
1287
1288extern void __netdev_watchdog_up(struct net_device *dev);
1289
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001290extern void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001292extern void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001294/**
1295 * netif_dormant_on - mark device as dormant.
1296 * @dev: network device
1297 *
1298 * Mark device as dormant (as per RFC2863).
1299 *
1300 * The dormant state indicates that the relevant interface is not
1301 * actually in a condition to pass packets (i.e., it is not 'up') but is
1302 * in a "pending" state, waiting for some external event. For "on-
1303 * demand" interfaces, this new state identifies the situation where the
1304 * interface is waiting for events to place it in the up state.
1305 *
1306 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001307static inline void netif_dormant_on(struct net_device *dev)
1308{
1309 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1310 linkwatch_fire_event(dev);
1311}
1312
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001313/**
1314 * netif_dormant_off - set device as not dormant.
1315 * @dev: network device
1316 *
1317 * Device is not in dormant state.
1318 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001319static inline void netif_dormant_off(struct net_device *dev)
1320{
1321 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1322 linkwatch_fire_event(dev);
1323}
1324
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001325/**
1326 * netif_dormant - test if carrier present
1327 * @dev: network device
1328 *
1329 * Check if carrier is present on device
1330 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001331static inline int netif_dormant(const struct net_device *dev)
1332{
1333 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1334}
1335
1336
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001337/**
1338 * netif_oper_up - test if device is operational
1339 * @dev: network device
1340 *
1341 * Check if carrier is operational
1342 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001343static inline int netif_oper_up(const struct net_device *dev) {
1344 return (dev->operstate == IF_OPER_UP ||
1345 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1346}
1347
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001348/**
1349 * netif_device_present - is device available or removed
1350 * @dev: network device
1351 *
1352 * Check if device has not been removed from system.
1353 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354static inline int netif_device_present(struct net_device *dev)
1355{
1356 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1357}
1358
Denis Vlasenko56079432006-03-29 15:57:29 -08001359extern void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Denis Vlasenko56079432006-03-29 15:57:29 -08001361extern void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
1363/*
1364 * Network interface message level settings
1365 */
1366#define HAVE_NETIF_MSG 1
1367
1368enum {
1369 NETIF_MSG_DRV = 0x0001,
1370 NETIF_MSG_PROBE = 0x0002,
1371 NETIF_MSG_LINK = 0x0004,
1372 NETIF_MSG_TIMER = 0x0008,
1373 NETIF_MSG_IFDOWN = 0x0010,
1374 NETIF_MSG_IFUP = 0x0020,
1375 NETIF_MSG_RX_ERR = 0x0040,
1376 NETIF_MSG_TX_ERR = 0x0080,
1377 NETIF_MSG_TX_QUEUED = 0x0100,
1378 NETIF_MSG_INTR = 0x0200,
1379 NETIF_MSG_TX_DONE = 0x0400,
1380 NETIF_MSG_RX_STATUS = 0x0800,
1381 NETIF_MSG_PKTDATA = 0x1000,
1382 NETIF_MSG_HW = 0x2000,
1383 NETIF_MSG_WOL = 0x4000,
1384};
1385
1386#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1387#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1388#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1389#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1390#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1391#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1392#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1393#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1394#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1395#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1396#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1397#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1398#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1399#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1400#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1401
1402static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1403{
1404 /* use default */
1405 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1406 return default_msg_enable_bits;
1407 if (debug_value == 0) /* no output */
1408 return 0;
1409 /* set low N bits */
1410 return (1 << debug_value) - 1;
1411}
1412
shemminger@osdl.org0a122572005-11-30 11:45:17 -08001413/* Test if receive needs to be scheduled but only if up */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001414static inline int netif_rx_schedule_prep(struct net_device *dev,
1415 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
David S. Millera0a46192008-01-07 20:35:07 -08001417 return napi_schedule_prep(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418}
1419
1420/* Add interface to tail of rx poll list. This assumes that _prep has
1421 * already been called and returned 1.
1422 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001423static inline void __netif_rx_schedule(struct net_device *dev,
1424 struct napi_struct *napi)
1425{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001426 __napi_schedule(napi);
1427}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
1429/* Try to reschedule poll. Called by irq handler. */
1430
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001431static inline void netif_rx_schedule(struct net_device *dev,
1432 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001434 if (netif_rx_schedule_prep(dev, napi))
1435 __netif_rx_schedule(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436}
1437
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001438/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1439static inline int netif_rx_reschedule(struct net_device *dev,
1440 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001442 if (napi_schedule_prep(napi)) {
1443 __netif_rx_schedule(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 return 1;
1445 }
1446 return 0;
1447}
1448
Herbert Xub0ba6662007-05-29 13:22:52 -07001449/* same as netif_rx_complete, except that local_irq_save(flags)
1450 * has already been issued
1451 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001452static inline void __netif_rx_complete(struct net_device *dev,
1453 struct napi_struct *napi)
Herbert Xub0ba6662007-05-29 13:22:52 -07001454{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001455 __napi_complete(napi);
Herbert Xub0ba6662007-05-29 13:22:52 -07001456}
1457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458/* Remove interface from poll list: it must be in the poll list
1459 * on current cpu. This primitive is called by dev->poll(), when
1460 * it completes the work. The device cannot be out of poll list at this
1461 * moment, it is BUG().
1462 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001463static inline void netif_rx_complete(struct net_device *dev,
1464 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
1466 unsigned long flags;
1467
1468 local_irq_save(flags);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001469 __netif_rx_complete(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 local_irq_restore(flags);
1471}
1472
David S. Millerc773e842008-07-08 23:13:53 -07001473static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07001474{
David S. Millerc773e842008-07-08 23:13:53 -07001475 spin_lock(&txq->_xmit_lock);
1476 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001477}
1478
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001479static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1480{
1481 spin_lock_bh(&txq->_xmit_lock);
1482 txq->xmit_lock_owner = smp_processor_id();
1483}
1484
David S. Millerc773e842008-07-08 23:13:53 -07001485static inline int __netif_tx_trylock(struct netdev_queue *txq)
1486{
1487 int ok = spin_trylock(&txq->_xmit_lock);
1488 if (likely(ok))
1489 txq->xmit_lock_owner = smp_processor_id();
1490 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07001491}
1492
David S. Millerc773e842008-07-08 23:13:53 -07001493static inline void __netif_tx_unlock(struct netdev_queue *txq)
1494{
1495 txq->xmit_lock_owner = -1;
1496 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001497}
1498
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001499static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1500{
1501 txq->xmit_lock_owner = -1;
1502 spin_unlock_bh(&txq->_xmit_lock);
1503}
1504
David S. Millerc3f26a22008-07-31 16:58:50 -07001505/**
1506 * netif_tx_lock - grab network device transmit lock
1507 * @dev: network device
1508 * @cpu: cpu number of lock owner
1509 *
1510 * Get network device transmit lock
1511 */
1512static inline void netif_tx_lock(struct net_device *dev)
1513{
1514 unsigned int i;
1515 int cpu;
1516
1517 spin_lock(&dev->tx_global_lock);
1518 cpu = smp_processor_id();
1519 for (i = 0; i < dev->num_tx_queues; i++) {
1520 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1521
1522 /* We are the only thread of execution doing a
1523 * freeze, but we have to grab the _xmit_lock in
1524 * order to synchronize with threads which are in
1525 * the ->hard_start_xmit() handler and already
1526 * checked the frozen bit.
1527 */
1528 __netif_tx_lock(txq, cpu);
1529 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1530 __netif_tx_unlock(txq);
1531 }
1532}
1533
1534static inline void netif_tx_lock_bh(struct net_device *dev)
1535{
1536 local_bh_disable();
1537 netif_tx_lock(dev);
1538}
1539
Herbert Xu932ff272006-06-09 12:20:56 -07001540static inline void netif_tx_unlock(struct net_device *dev)
1541{
David S. Millere8a04642008-07-17 00:34:19 -07001542 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07001543
David S. Millere8a04642008-07-17 00:34:19 -07001544 for (i = 0; i < dev->num_tx_queues; i++) {
1545 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07001546
David S. Millerc3f26a22008-07-31 16:58:50 -07001547 /* No need to grab the _xmit_lock here. If the
1548 * queue is not stopped for another reason, we
1549 * force a schedule.
1550 */
1551 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1552 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1553 __netif_schedule(txq->qdisc);
1554 }
1555 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001556}
1557
1558static inline void netif_tx_unlock_bh(struct net_device *dev)
1559{
David S. Millere8a04642008-07-17 00:34:19 -07001560 netif_tx_unlock(dev);
1561 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07001562}
1563
David S. Millerc773e842008-07-08 23:13:53 -07001564#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001565 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001566 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001567 } \
1568}
1569
David S. Millerc773e842008-07-08 23:13:53 -07001570#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001571 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001572 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001573 } \
1574}
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576static inline void netif_tx_disable(struct net_device *dev)
1577{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001578 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07001579 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001580
David S. Millerc3f26a22008-07-31 16:58:50 -07001581 local_bh_disable();
1582 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001583 for (i = 0; i < dev->num_tx_queues; i++) {
1584 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07001585
1586 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001587 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07001588 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001589 }
David S. Millerc3f26a22008-07-31 16:58:50 -07001590 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
1592
David S. Millere308a5d2008-07-15 00:13:44 -07001593static inline void netif_addr_lock(struct net_device *dev)
1594{
1595 spin_lock(&dev->addr_list_lock);
1596}
1597
1598static inline void netif_addr_lock_bh(struct net_device *dev)
1599{
1600 spin_lock_bh(&dev->addr_list_lock);
1601}
1602
1603static inline void netif_addr_unlock(struct net_device *dev)
1604{
1605 spin_unlock(&dev->addr_list_lock);
1606}
1607
1608static inline void netif_addr_unlock_bh(struct net_device *dev)
1609{
1610 spin_unlock_bh(&dev->addr_list_lock);
1611}
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1614
1615extern void ether_setup(struct net_device *dev);
1616
1617/* Support for loadable net-drivers */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001618extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1619 void (*setup)(struct net_device *),
1620 unsigned int queue_count);
1621#define alloc_netdev(sizeof_priv, name, setup) \
1622 alloc_netdev_mq(sizeof_priv, name, setup, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623extern int register_netdev(struct net_device *dev);
1624extern void unregister_netdev(struct net_device *dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07001625/* Functions used for secondary unicast and multicast support */
1626extern void dev_set_rx_mode(struct net_device *dev);
1627extern void __dev_set_rx_mode(struct net_device *dev);
1628extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1629extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001630extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1631extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1633extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
Patrick McHardya0a400d2007-07-14 18:52:02 -07001634extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1635extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07001636extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1637extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001638extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1639extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
Wang Chendad9b332008-06-18 01:48:28 -07001640extern int dev_set_promiscuity(struct net_device *dev, int inc);
1641extern int dev_set_allmulti(struct net_device *dev, int inc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642extern void netdev_state_change(struct net_device *dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001643extern void netdev_bonding_change(struct net_device *dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001644extern void netdev_features_change(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645/* Load a device via the kmod */
Eric W. Biederman881d9662007-09-17 11:56:21 -07001646extern void dev_load(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647extern void dev_mcast_init(void);
1648extern int netdev_max_backlog;
1649extern int weight_p;
1650extern int netdev_set_master(struct net_device *dev, struct net_device *master);
Patrick McHardy84fa7932006-08-29 16:44:56 -07001651extern int skb_checksum_help(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001652extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
Herbert Xufb286bb2005-11-10 13:01:24 -08001653#ifdef CONFIG_BUG
1654extern void netdev_rx_csum_fault(struct net_device *dev);
1655#else
1656static inline void netdev_rx_csum_fault(struct net_device *dev)
1657{
1658}
1659#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660/* rx skb timestamps */
1661extern void net_enable_timestamp(void);
1662extern void net_disable_timestamp(void);
1663
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001664#ifdef CONFIG_PROC_FS
1665extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1666extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1667extern void dev_seq_stop(struct seq_file *seq, void *v);
1668#endif
1669
Jay Vosburghb8a97872008-06-13 18:12:04 -07001670extern int netdev_class_create_file(struct class_attribute *class_attr);
1671extern void netdev_class_remove_file(struct class_attribute *class_attr);
1672
Arjan van de Ven6579e572008-07-21 13:31:48 -07001673extern char *netdev_drivername(struct net_device *dev, char *buffer, int len);
1674
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001675extern void linkwatch_run_queue(void);
1676
Herbert Xu7f353bf2007-08-10 15:47:58 -07001677extern int netdev_compute_features(unsigned long all, unsigned long one);
1678
Herbert Xubcd76112006-06-30 13:36:35 -07001679static inline int net_gso_ok(int features, int gso_type)
1680{
1681 int feature = gso_type << NETIF_F_GSO_SHIFT;
1682 return (features & feature) == feature;
1683}
1684
Herbert Xu576a30e2006-06-27 13:22:38 -07001685static inline int skb_gso_ok(struct sk_buff *skb, int features)
1686{
Herbert Xua430a432006-07-08 13:34:56 -07001687 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
Herbert Xu576a30e2006-06-27 13:22:38 -07001688}
1689
Herbert Xu79671682006-06-22 02:40:14 -07001690static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1691{
Herbert Xua430a432006-07-08 13:34:56 -07001692 return skb_is_gso(skb) &&
1693 (!skb_gso_ok(skb, dev->features) ||
Patrick McHardy84fa7932006-08-29 16:44:56 -07001694 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
Herbert Xu79671682006-06-22 02:40:14 -07001695}
1696
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001697static inline void netif_set_gso_max_size(struct net_device *dev,
1698 unsigned int size)
1699{
1700 dev->gso_max_size = size;
1701}
1702
David S. Miller7ea49ed2006-08-14 17:08:36 -07001703/* On bonding slaves other than the currently active slave, suppress
Jay Vosburghf5b2b962006-09-22 21:54:53 -07001704 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1705 * ARP on active-backup slaves with arp_validate enabled.
David S. Miller7ea49ed2006-08-14 17:08:36 -07001706 */
1707static inline int skb_bond_should_drop(struct sk_buff *skb)
1708{
1709 struct net_device *dev = skb->dev;
1710 struct net_device *master = dev->master;
1711
1712 if (master &&
1713 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
Jay Vosburghf5b2b962006-09-22 21:54:53 -07001714 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1715 skb->protocol == __constant_htons(ETH_P_ARP))
1716 return 0;
1717
David S. Miller7ea49ed2006-08-14 17:08:36 -07001718 if (master->priv_flags & IFF_MASTER_ALB) {
1719 if (skb->pkt_type != PACKET_BROADCAST &&
1720 skb->pkt_type != PACKET_MULTICAST)
1721 return 0;
1722 }
1723 if (master->priv_flags & IFF_MASTER_8023AD &&
1724 skb->protocol == __constant_htons(ETH_P_SLOW))
1725 return 0;
1726
1727 return 1;
1728 }
1729 return 0;
1730}
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732#endif /* __KERNEL__ */
1733
1734#endif /* _LINUX_DEV_H */