Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_RTNETLINK_H |
| 3 | #define __LINUX_RTNETLINK_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 6 | #include <linux/mutex.h> |
Andy Whitcroft | 3b42a96 | 2010-11-15 06:01:59 +0000 | [diff] [blame] | 7 | #include <linux/netdevice.h> |
Cong Wang | 200b916 | 2014-05-12 15:11:20 -0700 | [diff] [blame] | 8 | #include <linux/wait.h> |
Vlad Buslov | 6f99528 | 2018-09-24 19:22:49 +0300 | [diff] [blame] | 9 | #include <linux/refcount.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 10 | #include <uapi/linux/rtnetlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 12 | extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); |
| 13 | extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); |
Pablo Neira Ayuso | 1ce85fe | 2009-02-24 23:18:28 -0800 | [diff] [blame] | 14 | extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, |
| 15 | u32 group, struct nlmsghdr *nlh, gfp_t flags); |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 16 | extern void rtnl_set_sk_err(struct net *net, u32 group, int error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); |
Thomas Graf | e3703b3 | 2006-11-27 09:27:07 -0800 | [diff] [blame] | 18 | extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, |
David S. Miller | 87a5069 | 2012-07-10 05:06:14 -0700 | [diff] [blame] | 19 | u32 id, long expires, u32 error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Alexei Starovoitov | 7f29405 | 2013-10-23 16:02:42 -0700 | [diff] [blame] | 21 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); |
Nicolas Dichtel | 6621dd2 | 2017-10-03 13:53:23 +0200 | [diff] [blame] | 22 | void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, |
Nicolas Dichtel | 38e01b3 | 2018-01-25 15:01:39 +0100 | [diff] [blame] | 23 | gfp_t flags, int *new_nsid, int new_ifindex); |
Mahesh Bandewar | 395eea6 | 2014-12-03 13:46:24 -0800 | [diff] [blame] | 24 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, |
Vlad Yasevich | 3d3ea5a | 2017-05-27 10:14:34 -0400 | [diff] [blame] | 25 | unsigned change, u32 event, |
Nicolas Dichtel | 38e01b3 | 2018-01-25 15:01:39 +0100 | [diff] [blame] | 26 | gfp_t flags, int *new_nsid, |
| 27 | int new_ifindex); |
Mahesh Bandewar | 395eea6 | 2014-12-03 13:46:24 -0800 | [diff] [blame] | 28 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, |
| 29 | gfp_t flags); |
| 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 32 | /* RTNL is used as a global lock for all changes to network configuration */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | extern void rtnl_lock(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | extern void rtnl_unlock(void); |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 35 | extern int rtnl_trylock(void); |
Patrick McHardy | c9c1014 | 2008-04-23 22:10:48 -0700 | [diff] [blame] | 36 | extern int rtnl_is_locked(void); |
Kirill Tkhai | 79ffdfc | 2018-03-14 22:17:20 +0300 | [diff] [blame] | 37 | extern int rtnl_lock_killable(void); |
Vlad Buslov | 6f99528 | 2018-09-24 19:22:49 +0300 | [diff] [blame] | 38 | extern bool refcount_dec_and_rtnl_lock(refcount_t *r); |
Cong Wang | 200b916 | 2014-05-12 15:11:20 -0700 | [diff] [blame] | 39 | |
| 40 | extern wait_queue_head_t netdev_unregistering_wq; |
Kirill Tkhai | 4420bf2 | 2018-03-27 18:02:23 +0300 | [diff] [blame] | 41 | extern struct rw_semaphore pernet_ops_rwsem; |
Kirill Tkhai | f0b07bb1 | 2018-03-29 19:20:32 +0300 | [diff] [blame] | 42 | extern struct rw_semaphore net_rwsem; |
Cong Wang | 200b916 | 2014-05-12 15:11:20 -0700 | [diff] [blame] | 43 | |
Paul E. McKenney | a898def | 2010-02-22 17:04:49 -0800 | [diff] [blame] | 44 | #ifdef CONFIG_PROVE_LOCKING |
Yaowei Bai | 0cbf334 | 2015-10-08 21:29:02 +0800 | [diff] [blame] | 45 | extern bool lockdep_rtnl_is_held(void); |
John Fastabend | 8532824 | 2013-11-26 06:33:52 +0000 | [diff] [blame] | 46 | #else |
Yaowei Bai | 0cbf334 | 2015-10-08 21:29:02 +0800 | [diff] [blame] | 47 | static inline bool lockdep_rtnl_is_held(void) |
John Fastabend | 8532824 | 2013-11-26 06:33:52 +0000 | [diff] [blame] | 48 | { |
Yaowei Bai | 0cbf334 | 2015-10-08 21:29:02 +0800 | [diff] [blame] | 49 | return true; |
John Fastabend | 8532824 | 2013-11-26 06:33:52 +0000 | [diff] [blame] | 50 | } |
Paul E. McKenney | a898def | 2010-02-22 17:04:49 -0800 | [diff] [blame] | 51 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
Stephen Hemminger | 6756ae4 | 2006-03-20 22:23:58 -0800 | [diff] [blame] | 52 | |
Eric Dumazet | a6e0fc8 | 2010-09-08 14:15:32 -0700 | [diff] [blame] | 53 | /** |
| 54 | * rcu_dereference_rtnl - rcu_dereference with debug checking |
| 55 | * @p: The pointer to read, prior to dereferencing |
| 56 | * |
| 57 | * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 58 | * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() |
Eric Dumazet | a6e0fc8 | 2010-09-08 14:15:32 -0700 | [diff] [blame] | 59 | */ |
| 60 | #define rcu_dereference_rtnl(p) \ |
Michal Hocko | d8bf4ca | 2011-07-08 14:39:41 +0200 | [diff] [blame] | 61 | rcu_dereference_check(p, lockdep_rtnl_is_held()) |
Eric Dumazet | a6e0fc8 | 2010-09-08 14:15:32 -0700 | [diff] [blame] | 62 | |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 63 | /** |
John Fastabend | 331b729 | 2014-09-12 20:08:20 -0700 | [diff] [blame] | 64 | * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking |
| 65 | * @p: The pointer to read, prior to dereference |
| 66 | * |
| 67 | * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() |
| 68 | * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() |
| 69 | */ |
| 70 | #define rcu_dereference_bh_rtnl(p) \ |
| 71 | rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) |
| 72 | |
| 73 | /** |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 74 | * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 75 | * @p: The pointer to read, prior to dereferencing |
| 76 | * |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 77 | * Return the value of the specified RCU-protected pointer, but omit |
Paul E. McKenney | 1ba9c5e | 2017-10-09 10:37:22 -0700 | [diff] [blame] | 78 | * the READ_ONCE(), because caller holds RTNL. |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 79 | */ |
| 80 | #define rtnl_dereference(p) \ |
David S. Miller | 29fa060 | 2010-10-05 00:29:48 -0700 | [diff] [blame] | 81 | rcu_dereference_protected(p, lockdep_rtnl_is_held()) |
Eric Dumazet | 7dff59e | 2010-09-15 11:07:15 +0000 | [diff] [blame] | 82 | |
Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 83 | static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) |
| 84 | { |
| 85 | return rtnl_dereference(dev->ingress_queue); |
| 86 | } |
| 87 | |
Vlad Buslov | 3a7d0d0 | 2018-09-24 19:22:51 +0300 | [diff] [blame] | 88 | static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev) |
| 89 | { |
| 90 | return rcu_dereference(dev->ingress_queue); |
| 91 | } |
| 92 | |
Daniel Borkmann | 4577139 | 2015-04-10 23:07:54 +0200 | [diff] [blame] | 93 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); |
| 94 | |
Pablo Neira | 1cf51900 | 2015-05-13 18:19:37 +0200 | [diff] [blame] | 95 | #ifdef CONFIG_NET_INGRESS |
Daniel Borkmann | 4577139 | 2015-04-10 23:07:54 +0200 | [diff] [blame] | 96 | void net_inc_ingress_queue(void); |
| 97 | void net_dec_ingress_queue(void); |
Daniel Borkmann | 4577139 | 2015-04-10 23:07:54 +0200 | [diff] [blame] | 98 | #endif |
Eric Dumazet | 24824a0 | 2010-10-02 06:11:55 +0000 | [diff] [blame] | 99 | |
Daniel Borkmann | 1f211a1 | 2016-01-07 22:29:47 +0100 | [diff] [blame] | 100 | #ifdef CONFIG_NET_EGRESS |
| 101 | void net_inc_egress_queue(void); |
| 102 | void net_dec_egress_queue(void); |
| 103 | #endif |
| 104 | |
Eric Dumazet | 1b5c549 | 2016-06-13 20:21:50 -0700 | [diff] [blame] | 105 | void rtnetlink_init(void); |
| 106 | void __rtnl_unlock(void); |
| 107 | void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Leon Romanovsky | 66364bd | 2017-12-21 11:40:04 +0200 | [diff] [blame] | 109 | #define ASSERT_RTNL() \ |
| 110 | WARN_ONCE(!rtnl_is_locked(), \ |
| 111 | "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
John Fastabend | 7716202 | 2012-04-15 06:43:56 +0000 | [diff] [blame] | 113 | extern int ndo_dflt_fdb_dump(struct sk_buff *skb, |
| 114 | struct netlink_callback *cb, |
| 115 | struct net_device *dev, |
Jamal Hadi Salim | 5d5eacb | 2014-07-10 07:01:58 -0400 | [diff] [blame] | 116 | struct net_device *filter_dev, |
Roopa Prabhu | d297653 | 2016-08-30 21:56:45 -0700 | [diff] [blame] | 117 | int *idx); |
Vlad Yasevich | 090096b | 2013-03-06 15:39:42 +0000 | [diff] [blame] | 118 | extern int ndo_dflt_fdb_add(struct ndmsg *ndm, |
| 119 | struct nlattr *tb[], |
| 120 | struct net_device *dev, |
| 121 | const unsigned char *addr, |
Jiri Pirko | f6f6424 | 2014-11-28 14:34:15 +0100 | [diff] [blame] | 122 | u16 vid, |
| 123 | u16 flags); |
Vlad Yasevich | 090096b | 2013-03-06 15:39:42 +0000 | [diff] [blame] | 124 | extern int ndo_dflt_fdb_del(struct ndmsg *ndm, |
| 125 | struct nlattr *tb[], |
| 126 | struct net_device *dev, |
Jiri Pirko | f6f6424 | 2014-11-28 14:34:15 +0100 | [diff] [blame] | 127 | const unsigned char *addr, |
| 128 | u16 vid); |
John Fastabend | 815cccb | 2012-10-24 08:13:09 +0000 | [diff] [blame] | 129 | |
| 130 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
Scott Feldman | 2c3c031 | 2014-11-28 14:34:25 +0100 | [diff] [blame] | 131 | struct net_device *dev, u16 mode, |
Scott Feldman | 7d4f8d8 | 2015-06-22 00:27:17 -0700 | [diff] [blame] | 132 | u32 flags, u32 mask, int nlflags, |
| 133 | u32 filter_mask, |
| 134 | int (*vlan_fill)(struct sk_buff *skb, |
| 135 | struct net_device *dev, |
| 136 | u32 filter_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | #endif /* __LINUX_RTNETLINK_H */ |