Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_RTNETLINK_H |
| 2 | #define __LINUX_RTNETLINK_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 5 | #include <linux/mutex.h> |
Andy Whitcroft | 3b42a96 | 2010-11-15 06:01:59 +0000 | [diff] [blame] | 6 | #include <linux/netdevice.h> |
Cong Wang | 200b916 | 2014-05-12 15:11:20 -0700 | [diff] [blame] | 7 | #include <linux/wait.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 8 | #include <uapi/linux/rtnetlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 10 | extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); |
| 11 | extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); |
Pablo Neira Ayuso | 1ce85fe | 2009-02-24 23:18:28 -0800 | [diff] [blame] | 12 | extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, |
| 13 | u32 group, struct nlmsghdr *nlh, gfp_t flags); |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 14 | extern void rtnl_set_sk_err(struct net *net, u32 group, int error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); |
Thomas Graf | e3703b3 | 2006-11-27 09:27:07 -0800 | [diff] [blame] | 16 | extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, |
David S. Miller | 87a5069 | 2012-07-10 05:06:14 -0700 | [diff] [blame] | 17 | u32 id, long expires, u32 error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Alexei Starovoitov | 7f29405 | 2013-10-23 16:02:42 -0700 | [diff] [blame] | 19 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); |
Mahesh Bandewar | 395eea6 | 2014-12-03 13:46:24 -0800 | [diff] [blame] | 20 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, |
| 21 | unsigned change, gfp_t flags); |
| 22 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, |
| 23 | gfp_t flags); |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 26 | /* RTNL is used as a global lock for all changes to network configuration */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | extern void rtnl_lock(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | extern void rtnl_unlock(void); |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 29 | extern int rtnl_trylock(void); |
Patrick McHardy | c9c1014 | 2008-04-23 22:10:48 -0700 | [diff] [blame] | 30 | extern int rtnl_is_locked(void); |
Cong Wang | 200b916 | 2014-05-12 15:11:20 -0700 | [diff] [blame] | 31 | |
| 32 | extern wait_queue_head_t netdev_unregistering_wq; |
| 33 | extern struct mutex net_mutex; |
| 34 | |
Paul E. McKenney | a898def | 2010-02-22 17:04:49 -0800 | [diff] [blame] | 35 | #ifdef CONFIG_PROVE_LOCKING |
Yaowei Bai | 0cbf334 | 2015-10-08 21:29:02 +0800 | [diff] [blame] | 36 | extern bool lockdep_rtnl_is_held(void); |
John Fastabend | 8532824 | 2013-11-26 06:33:52 +0000 | [diff] [blame] | 37 | #else |
Yaowei Bai | 0cbf334 | 2015-10-08 21:29:02 +0800 | [diff] [blame] | 38 | static inline bool lockdep_rtnl_is_held(void) |
John Fastabend | 8532824 | 2013-11-26 06:33:52 +0000 | [diff] [blame] | 39 | { |
Yaowei Bai | 0cbf334 | 2015-10-08 21:29:02 +0800 | [diff] [blame] | 40 | return true; |
John Fastabend | 8532824 | 2013-11-26 06:33:52 +0000 | [diff] [blame] | 41 | } |
Paul E. McKenney | a898def | 2010-02-22 17:04:49 -0800 | [diff] [blame] | 42 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 43 | |
Eric Dumazet | a6e0fc8 | 2010-09-08 14:15:32 -0700 | [diff] [blame] | 44 | /** |
| 45 | * rcu_dereference_rtnl - rcu_dereference with debug checking |
| 46 | * @p: The pointer to read, prior to dereferencing |
| 47 | * |
| 48 | * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 49 | * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() |
Eric Dumazet | a6e0fc8 | 2010-09-08 14:15:32 -0700 | [diff] [blame] | 50 | */ |
| 51 | #define rcu_dereference_rtnl(p) \ |
Michal Hocko | d8bf4ca | 2011-07-08 14:39:41 +0200 | [diff] [blame] | 52 | rcu_dereference_check(p, lockdep_rtnl_is_held()) |
Eric Dumazet | a6e0fc8 | 2010-09-08 14:15:32 -0700 | [diff] [blame] | 53 | |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 54 | /** |
John Fastabend | 331b729 | 2014-09-12 20:08:20 -0700 | [diff] [blame] | 55 | * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking |
| 56 | * @p: The pointer to read, prior to dereference |
| 57 | * |
| 58 | * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() |
| 59 | * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() |
| 60 | */ |
| 61 | #define rcu_dereference_bh_rtnl(p) \ |
| 62 | rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) |
| 63 | |
| 64 | /** |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 65 | * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 66 | * @p: The pointer to read, prior to dereferencing |
| 67 | * |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 68 | * Return the value of the specified RCU-protected pointer, but omit |
| 69 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because |
| 70 | * caller holds RTNL. |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 71 | */ |
| 72 | #define rtnl_dereference(p) \ |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 73 | rcu_dereference_protected(p, lockdep_rtnl_is_held()) |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 74 | |
Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 75 | static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) |
| 76 | { |
| 77 | return rtnl_dereference(dev->ingress_queue); |
| 78 | } |
| 79 | |
Daniel Borkmann | 4577139 | 2015-04-10 23:07:54 +0200 | [diff] [blame] | 80 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); |
| 81 | |
Pablo Neira | 1cf51900 | 2015-05-13 18:19:37 +0200 | [diff] [blame] | 82 | #ifdef CONFIG_NET_INGRESS |
Daniel Borkmann | 4577139 | 2015-04-10 23:07:54 +0200 | [diff] [blame] | 83 | void net_inc_ingress_queue(void); |
| 84 | void net_dec_ingress_queue(void); |
Daniel Borkmann | 4577139 | 2015-04-10 23:07:54 +0200 | [diff] [blame] | 85 | #endif |
Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 86 | |
Daniel Borkmann | 1f211a1 | 2016-01-07 22:29:47 +0100 | [diff] [blame] | 87 | #ifdef CONFIG_NET_EGRESS |
| 88 | void net_inc_egress_queue(void); |
| 89 | void net_dec_egress_queue(void); |
| 90 | #endif |
| 91 | |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 92 | void rtnetlink_init(void); |
| 93 | void __rtnl_unlock(void); |
| 94 | void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
| 96 | #define ASSERT_RTNL() do { \ |
Patrick McHardy | c9c1014 | 2008-04-23 22:10:48 -0700 | [diff] [blame] | 97 | if (unlikely(!rtnl_is_locked())) { \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ |
| 99 | __FILE__, __LINE__); \ |
| 100 | dump_stack(); \ |
| 101 | } \ |
| 102 | } while(0) |
| 103 | |
John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 104 | extern int ndo_dflt_fdb_dump(struct sk_buff *skb, |
| 105 | struct netlink_callback *cb, |
| 106 | struct net_device *dev, |
Jamal Hadi Salim | 5d5eacb | 2014-07-10 07:01:58 -0400 | [diff] [blame] | 107 | struct net_device *filter_dev, |
Roopa Prabhu | d297653 | 2016-08-30 21:56:45 -0700 | [diff] [blame] | 108 | int *idx); |
Vlad Yasevich | 090096b | 2013-03-06 15:39:42 +0000 | [diff] [blame] | 109 | extern int ndo_dflt_fdb_add(struct ndmsg *ndm, |
| 110 | struct nlattr *tb[], |
| 111 | struct net_device *dev, |
| 112 | const unsigned char *addr, |
Jiri Pirko | f6f6424 | 2014-11-28 14:34:15 +0100 | [diff] [blame] | 113 | u16 vid, |
| 114 | u16 flags); |
Vlad Yasevich | 090096b | 2013-03-06 15:39:42 +0000 | [diff] [blame] | 115 | extern int ndo_dflt_fdb_del(struct ndmsg *ndm, |
| 116 | struct nlattr *tb[], |
| 117 | struct net_device *dev, |
Jiri Pirko | f6f6424 | 2014-11-28 14:34:15 +0100 | [diff] [blame] | 118 | const unsigned char *addr, |
| 119 | u16 vid); |
John Fastabend | 815cccb | 2012-10-24 08:13:09 +0000 | [diff] [blame] | 120 | |
| 121 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
Scott Feldman | 2c3c031 | 2014-11-28 14:34:25 +0100 | [diff] [blame] | 122 | struct net_device *dev, u16 mode, |
Scott Feldman | 7d4f8d8 | 2015-06-22 00:27:17 -0700 | [diff] [blame] | 123 | u32 flags, u32 mask, int nlflags, |
| 124 | u32 filter_mask, |
| 125 | int (*vlan_fill)(struct sk_buff *skb, |
| 126 | struct net_device *dev, |
| 127 | u32 filter_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | #endif /* __LINUX_RTNETLINK_H */ |