Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Operations on the network namespace |
| 4 | */ |
| 5 | #ifndef __NET_NET_NAMESPACE_H |
| 6 | #define __NET_NET_NAMESPACE_H |
| 7 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 8 | #include <linux/atomic.h> |
Reshetova, Elena | c122e14 | 2017-06-30 13:08:08 +0300 | [diff] [blame] | 9 | #include <linux/refcount.h> |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 10 | #include <linux/workqueue.h> |
| 11 | #include <linux/list.h> |
David S. Miller | bee9525 | 2011-05-26 16:40:37 -0400 | [diff] [blame] | 12 | #include <linux/sysctl.h> |
Tyler Hicks | fbdeaed | 2018-07-20 21:56:53 +0000 | [diff] [blame] | 13 | #include <linux/uidgid.h> |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 14 | |
Cong Wang | 6a66271 | 2014-04-15 16:25:34 -0700 | [diff] [blame] | 15 | #include <net/flow.h> |
Pavel Emelyanov | 8efa6e9 | 2008-03-31 19:41:14 -0700 | [diff] [blame] | 16 | #include <net/netns/core.h> |
Pavel Emelyanov | 852566f5 | 2008-07-18 04:01:24 -0700 | [diff] [blame] | 17 | #include <net/netns/mib.h> |
Denis V. Lunev | a0a53c8 | 2007-12-11 04:19:17 -0800 | [diff] [blame] | 18 | #include <net/netns/unix.h> |
Denis V. Lunev | 2aaef4e | 2007-12-11 04:19:54 -0800 | [diff] [blame] | 19 | #include <net/netns/packet.h> |
Pavel Emelyanov | 8afd351 | 2007-12-16 13:29:36 -0800 | [diff] [blame] | 20 | #include <net/netns/ipv4.h> |
Daniel Lezcano | b0f159d | 2008-01-10 02:49:06 -0800 | [diff] [blame] | 21 | #include <net/netns/ipv6.h> |
David Ahern | ab84be7 | 2019-05-24 14:43:04 -0700 | [diff] [blame] | 22 | #include <net/netns/nexthop.h> |
Alexander Aring | 633fc86 | 2014-02-28 07:32:49 +0100 | [diff] [blame] | 23 | #include <net/netns/ieee802154_6lowpan.h> |
Eric W. Biederman | 4db67e8 | 2012-08-06 08:42:04 +0000 | [diff] [blame] | 24 | #include <net/netns/sctp.h> |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 25 | #include <net/netns/netfilter.h> |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 26 | #include <net/netns/x_tables.h> |
Alexey Dobriyan | dfdb8d7 | 2008-10-08 11:35:02 +0200 | [diff] [blame] | 27 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
| 28 | #include <net/netns/conntrack.h> |
| 29 | #endif |
Pablo Neira Ayuso | 99633ab | 2013-10-10 23:28:33 +0200 | [diff] [blame] | 30 | #include <net/netns/nftables.h> |
Alexey Dobriyan | d62ddc2 | 2008-11-25 17:14:31 -0800 | [diff] [blame] | 31 | #include <net/netns/xfrm.h> |
Eric W. Biederman | 0189197 | 2015-03-03 19:10:47 -0600 | [diff] [blame] | 32 | #include <net/netns/mpls.h> |
Mario Kicherer | 8e8cda6 | 2017-02-21 12:19:47 +0100 | [diff] [blame] | 33 | #include <net/netns/can.h> |
Björn Töpel | 1d0dc06 | 2019-01-24 19:59:37 +0100 | [diff] [blame] | 34 | #include <net/netns/xdp.h> |
Jakub Sitnicki | a3fd7ce | 2020-05-31 10:28:36 +0200 | [diff] [blame] | 35 | #include <net/netns/bpf.h> |
Al Viro | 435d5f4 | 2014-10-31 22:56:04 -0400 | [diff] [blame] | 36 | #include <linux/ns_common.h> |
Pablo Neira Ayuso | 04c52de | 2015-06-17 10:28:25 -0500 | [diff] [blame] | 37 | #include <linux/idr.h> |
| 38 | #include <linux/skbuff.h> |
Jiri Pirko | a30c7b4 | 2019-09-30 10:15:10 +0200 | [diff] [blame] | 39 | #include <linux/notifier.h> |
Denis V. Lunev | a0a53c8 | 2007-12-11 04:19:17 -0800 | [diff] [blame] | 40 | |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 41 | struct user_namespace; |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 42 | struct proc_dir_entry; |
Eric W. Biederman | 2774c7a | 2007-09-26 22:10:56 -0700 | [diff] [blame] | 43 | struct net_device; |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 44 | struct sock; |
Pavel Emelyanov | 1597fbc | 2007-12-01 23:51:01 +1100 | [diff] [blame] | 45 | struct ctl_table_header; |
Pavel Emelyanov | dec827d | 2008-04-15 00:36:08 -0700 | [diff] [blame] | 46 | struct net_generic; |
Christian Brauner | 94e5e30 | 2018-03-19 13:17:30 +0100 | [diff] [blame] | 47 | struct uevent_sock; |
Julian Anastasov | 2553d06 | 2011-03-04 12:18:07 +0200 | [diff] [blame] | 48 | struct netns_ipvs; |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 49 | struct bpf_prog; |
Pavel Emelyanov | 1597fbc | 2007-12-01 23:51:01 +1100 | [diff] [blame] | 50 | |
Eric Dumazet | 7c28bd0 | 2009-10-24 06:13:17 -0700 | [diff] [blame] | 51 | |
| 52 | #define NETDEV_HASHBITS 8 |
| 53 | #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) |
| 54 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 55 | struct net { |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 56 | /* First cache line can be often dirtied. |
| 57 | * Do not place here read-mostly fields. |
| 58 | */ |
Mike Rapoport | aad12c2 | 2019-08-21 14:29:29 +0300 | [diff] [blame] | 59 | refcount_t passive; /* To decide when the network |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 60 | * namespace should be freed. |
| 61 | */ |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 62 | spinlock_t rules_mod_lock; |
| 63 | |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 64 | unsigned int dev_unreg_count; |
| 65 | |
| 66 | unsigned int dev_base_seq; /* protected by rtnl_mutex */ |
| 67 | int ifindex; |
| 68 | |
| 69 | spinlock_t nsid_lock; |
| 70 | atomic_t fnhe_genid; |
Eric Dumazet | 33cf7c9 | 2015-03-11 18:53:14 -0700 | [diff] [blame] | 71 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 72 | struct list_head list; /* list of network namespaces */ |
Kirill Tkhai | 19efbd9 | 2018-02-19 12:58:38 +0300 | [diff] [blame] | 73 | struct list_head exit_list; /* To linked to call pernet exit |
Kirill Tkhai | 4420bf2 | 2018-03-27 18:02:23 +0300 | [diff] [blame] | 74 | * methods on dead net ( |
| 75 | * pernet_ops_rwsem read locked), |
| 76 | * or to unregister pernet ops |
| 77 | * (pernet_ops_rwsem write locked). |
Kirill Tkhai | 19efbd9 | 2018-02-19 12:58:38 +0300 | [diff] [blame] | 78 | */ |
Kirill Tkhai | 65b7b5b | 2018-02-19 12:58:45 +0300 | [diff] [blame] | 79 | struct llist_node cleanup_list; /* namespaces on death row */ |
| 80 | |
David Howells | 9b24261 | 2019-06-26 21:02:33 +0100 | [diff] [blame] | 81 | #ifdef CONFIG_KEYS |
| 82 | struct key_tag *key_domain; /* Key domain of operation tag */ |
| 83 | #endif |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 84 | struct user_namespace *user_ns; /* Owning user namespace */ |
Eric W. Biederman | 7032866 | 2016-08-08 14:33:23 -0500 | [diff] [blame] | 85 | struct ucounts *ucounts; |
Nicolas Dichtel | 0c7aecd | 2015-01-15 15:11:15 +0100 | [diff] [blame] | 86 | struct idr netns_ids; |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 87 | |
Al Viro | 435d5f4 | 2014-10-31 22:56:04 -0400 | [diff] [blame] | 88 | struct ns_common ns; |
Eric W. Biederman | 98f842e | 2011-06-15 10:21:48 -0700 | [diff] [blame] | 89 | |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 90 | struct list_head dev_base_head; |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 91 | struct proc_dir_entry *proc_net; |
| 92 | struct proc_dir_entry *proc_net_stat; |
Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 93 | |
Al Viro | 7345509 | 2008-07-14 21:22:20 -0400 | [diff] [blame] | 94 | #ifdef CONFIG_SYSCTL |
| 95 | struct ctl_table_set sysctls; |
| 96 | #endif |
Eric W. Biederman | 95bdfcc | 2007-11-30 23:55:42 +1100 | [diff] [blame] | 97 | |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 98 | struct sock *rtnl; /* rtnetlink socket */ |
| 99 | struct sock *genl_sock; |
Eric W. Biederman | 2774c7a | 2007-09-26 22:10:56 -0700 | [diff] [blame] | 100 | |
Christian Brauner | 94e5e30 | 2018-03-19 13:17:30 +0100 | [diff] [blame] | 101 | struct uevent_sock *uevent_sock; /* uevent socket */ |
| 102 | |
Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 103 | struct hlist_head *dev_name_head; |
| 104 | struct hlist_head *dev_index_head; |
Jiri Pirko | a30c7b4 | 2019-09-30 10:15:10 +0200 | [diff] [blame] | 105 | struct raw_notifier_head netdev_chain; |
| 106 | |
Eric Dumazet | 2a06b89 | 2019-10-18 15:20:05 -0700 | [diff] [blame] | 107 | /* Note that @hash_mix can be read millions times per second, |
| 108 | * it is critical that it is on a read_mostly cache line. |
| 109 | */ |
| 110 | u32 hash_mix; |
| 111 | |
| 112 | struct net_device *loopback_dev; /* The loopback */ |
Denis V. Lunev | 97c53ca | 2007-11-19 22:26:51 -0800 | [diff] [blame] | 113 | |
Denis V. Lunev | 5fd30ee | 2008-01-10 03:20:28 -0800 | [diff] [blame] | 114 | /* core fib_rules */ |
| 115 | struct list_head rules_ops; |
Denis V. Lunev | 5fd30ee | 2008-01-10 03:20:28 -0800 | [diff] [blame] | 116 | |
Pavel Emelyanov | 8efa6e9 | 2008-03-31 19:41:14 -0700 | [diff] [blame] | 117 | struct netns_core core; |
Pavel Emelyanov | 852566f5 | 2008-07-18 04:01:24 -0700 | [diff] [blame] | 118 | struct netns_mib mib; |
Denis V. Lunev | 2aaef4e | 2007-12-11 04:19:54 -0800 | [diff] [blame] | 119 | struct netns_packet packet; |
Denis V. Lunev | a0a53c8 | 2007-12-11 04:19:17 -0800 | [diff] [blame] | 120 | struct netns_unix unx; |
David Ahern | ab84be7 | 2019-05-24 14:43:04 -0700 | [diff] [blame] | 121 | struct netns_nexthop nexthop; |
Pavel Emelyanov | 8afd351 | 2007-12-16 13:29:36 -0800 | [diff] [blame] | 122 | struct netns_ipv4 ipv4; |
Eric Dumazet | dfd56b8 | 2011-12-10 09:48:31 +0000 | [diff] [blame] | 123 | #if IS_ENABLED(CONFIG_IPV6) |
Daniel Lezcano | b0f159d | 2008-01-10 02:49:06 -0800 | [diff] [blame] | 124 | struct netns_ipv6 ipv6; |
| 125 | #endif |
Alexander Aring | 633fc86 | 2014-02-28 07:32:49 +0100 | [diff] [blame] | 126 | #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) |
| 127 | struct netns_ieee802154_lowpan ieee802154_lowpan; |
| 128 | #endif |
Eric W. Biederman | 4db67e8 | 2012-08-06 08:42:04 +0000 | [diff] [blame] | 129 | #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE) |
| 130 | struct netns_sctp sctp; |
| 131 | #endif |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 132 | #ifdef CONFIG_NETFILTER |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 133 | struct netns_nf nf; |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 134 | struct netns_xt xt; |
Alexey Dobriyan | dfdb8d7 | 2008-10-08 11:35:02 +0200 | [diff] [blame] | 135 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
| 136 | struct netns_ct ct; |
| 137 | #endif |
Pablo Neira Ayuso | 99633ab | 2013-10-10 23:28:33 +0200 | [diff] [blame] | 138 | #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE) |
| 139 | struct netns_nftables nft; |
| 140 | #endif |
Alexey Dobriyan | 8d87005 | 2008-01-31 04:02:13 -0800 | [diff] [blame] | 141 | #endif |
Johannes Berg | 3d23e34 | 2009-09-29 23:27:28 +0200 | [diff] [blame] | 142 | #ifdef CONFIG_WEXT_CORE |
Johannes Berg | b333b3d22 | 2009-06-24 01:34:48 +0000 | [diff] [blame] | 143 | struct sk_buff_head wext_nlevents; |
| 144 | #endif |
Eric Dumazet | 1c87733 | 2010-10-25 03:20:11 +0000 | [diff] [blame] | 145 | struct net_generic __rcu *gen; |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 146 | |
Jakub Sitnicki | a3fd7ce | 2020-05-31 10:28:36 +0200 | [diff] [blame] | 147 | /* Used to store attached BPF programs */ |
| 148 | struct netns_bpf bpf; |
Petar Penkov | d58e468 | 2018-09-14 07:46:18 -0700 | [diff] [blame] | 149 | |
Eric Dumazet | 8e602ce | 2010-10-14 05:56:18 +0000 | [diff] [blame] | 150 | /* Note : following structs are cache line aligned */ |
| 151 | #ifdef CONFIG_XFRM |
| 152 | struct netns_xfrm xfrm; |
| 153 | #endif |
Daniel Borkmann | f318903 | 2020-03-27 16:58:52 +0100 | [diff] [blame] | 154 | |
Eric Dumazet | 3d368ab | 2021-02-10 06:41:44 -0800 | [diff] [blame] | 155 | u64 net_cookie; /* written once */ |
Daniel Borkmann | f318903 | 2020-03-27 16:58:52 +0100 | [diff] [blame] | 156 | |
JunweiZhang | 8b4d14d | 2013-06-26 16:40:06 +0800 | [diff] [blame] | 157 | #if IS_ENABLED(CONFIG_IP_VS) |
Hans Schillstrom | 61b1ab4 | 2011-01-03 14:44:42 +0100 | [diff] [blame] | 158 | struct netns_ipvs *ipvs; |
JunweiZhang | 8b4d14d | 2013-06-26 16:40:06 +0800 | [diff] [blame] | 159 | #endif |
Eric W. Biederman | 0189197 | 2015-03-03 19:10:47 -0600 | [diff] [blame] | 160 | #if IS_ENABLED(CONFIG_MPLS) |
| 161 | struct netns_mpls mpls; |
| 162 | #endif |
Mario Kicherer | 8e8cda6 | 2017-02-21 12:19:47 +0100 | [diff] [blame] | 163 | #if IS_ENABLED(CONFIG_CAN) |
| 164 | struct netns_can can; |
| 165 | #endif |
Björn Töpel | 1d0dc06 | 2019-01-24 19:59:37 +0100 | [diff] [blame] | 166 | #ifdef CONFIG_XDP_SOCKETS |
| 167 | struct netns_xdp xdp; |
| 168 | #endif |
Ondrej Mosnacek | 91b05a7 | 2019-07-09 13:11:24 +0200 | [diff] [blame] | 169 | #if IS_ENABLED(CONFIG_CRYPTO_USER) |
| 170 | struct sock *crypto_nlsk; |
| 171 | #endif |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 172 | struct sock *diag_nlsk; |
Kees Cook | 3859a27 | 2016-10-28 01:22:25 -0700 | [diff] [blame] | 173 | } __randomize_layout; |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 174 | |
Denis V. Lunev | c0f3932 | 2008-04-02 00:10:28 -0700 | [diff] [blame] | 175 | #include <linux/seq_file_net.h> |
| 176 | |
Daniel Lezcano | 4fabcd7 | 2007-09-13 09:16:29 +0200 | [diff] [blame] | 177 | /* Init's network namespace */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 178 | extern struct net init_net; |
Denis V. Lunev | a4aa834 | 2008-04-03 13:04:33 -0700 | [diff] [blame] | 179 | |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 180 | #ifdef CONFIG_NET_NS |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 181 | struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, |
| 182 | struct net *old_net); |
Denis V. Lunev | 225c0a0 | 2008-04-02 00:09:29 -0700 | [diff] [blame] | 183 | |
Tyler Hicks | fbdeaed | 2018-07-20 21:56:53 +0000 | [diff] [blame] | 184 | void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid); |
| 185 | |
Florian Westphal | 7866cc5 | 2017-05-30 11:38:12 +0200 | [diff] [blame] | 186 | void net_ns_barrier(void); |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 187 | #else /* CONFIG_NET_NS */ |
| 188 | #include <linux/sched.h> |
| 189 | #include <linux/nsproxy.h> |
Eric W. Biederman | 038e733 | 2012-06-14 02:31:10 -0700 | [diff] [blame] | 190 | static inline struct net *copy_net_ns(unsigned long flags, |
| 191 | struct user_namespace *user_ns, struct net *old_net) |
Eric W. Biederman | 9dd776b | 2007-09-26 22:04:26 -0700 | [diff] [blame] | 192 | { |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 193 | if (flags & CLONE_NEWNET) |
| 194 | return ERR_PTR(-EINVAL); |
| 195 | return old_net; |
Eric W. Biederman | 9dd776b | 2007-09-26 22:04:26 -0700 | [diff] [blame] | 196 | } |
Florian Westphal | 7866cc5 | 2017-05-30 11:38:12 +0200 | [diff] [blame] | 197 | |
Tyler Hicks | fbdeaed | 2018-07-20 21:56:53 +0000 | [diff] [blame] | 198 | static inline void net_ns_get_ownership(const struct net *net, |
| 199 | kuid_t *uid, kgid_t *gid) |
| 200 | { |
| 201 | *uid = GLOBAL_ROOT_UID; |
| 202 | *gid = GLOBAL_ROOT_GID; |
| 203 | } |
| 204 | |
Florian Westphal | 7866cc5 | 2017-05-30 11:38:12 +0200 | [diff] [blame] | 205 | static inline void net_ns_barrier(void) {} |
Eric W. Biederman | d727abc | 2012-06-14 02:16:42 -0700 | [diff] [blame] | 206 | #endif /* CONFIG_NET_NS */ |
Denis V. Lunev | 225c0a0 | 2008-04-02 00:09:29 -0700 | [diff] [blame] | 207 | |
| 208 | |
| 209 | extern struct list_head net_namespace_list; |
Eric W. Biederman | 9dd776b | 2007-09-26 22:04:26 -0700 | [diff] [blame] | 210 | |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 211 | struct net *get_net_ns_by_pid(pid_t pid); |
Stefan Hajnoczi | 0f5258c | 2016-11-18 09:41:46 +0000 | [diff] [blame] | 212 | struct net *get_net_ns_by_fd(int fd); |
Johannes Berg | 30ffee8 | 2009-07-10 09:51:35 +0000 | [diff] [blame] | 213 | |
Rashika Kheria | 535d3ae | 2014-02-09 22:29:14 +0530 | [diff] [blame] | 214 | #ifdef CONFIG_SYSCTL |
| 215 | void ipx_register_sysctl(void); |
| 216 | void ipx_unregister_sysctl(void); |
| 217 | #else |
| 218 | #define ipx_register_sysctl() |
| 219 | #define ipx_unregister_sysctl() |
| 220 | #endif |
| 221 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 222 | #ifdef CONFIG_NET_NS |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 223 | void __put_net(struct net *net); |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 224 | |
| 225 | static inline struct net *get_net(struct net *net) |
| 226 | { |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 227 | refcount_inc(&net->ns.count); |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 228 | return net; |
| 229 | } |
| 230 | |
Eric W. Biederman | 077130c | 2007-09-13 09:18:57 +0200 | [diff] [blame] | 231 | static inline struct net *maybe_get_net(struct net *net) |
| 232 | { |
| 233 | /* Used when we know struct net exists but we |
| 234 | * aren't guaranteed a previous reference count |
| 235 | * exists. If the reference count is zero this |
| 236 | * function fails and returns NULL. |
| 237 | */ |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 238 | if (!refcount_inc_not_zero(&net->ns.count)) |
Eric W. Biederman | 077130c | 2007-09-13 09:18:57 +0200 | [diff] [blame] | 239 | net = NULL; |
| 240 | return net; |
| 241 | } |
| 242 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 243 | static inline void put_net(struct net *net) |
| 244 | { |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 245 | if (refcount_dec_and_test(&net->ns.count)) |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 246 | __put_net(net); |
| 247 | } |
| 248 | |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 249 | static inline |
| 250 | int net_eq(const struct net *net1, const struct net *net2) |
| 251 | { |
| 252 | return net1 == net2; |
| 253 | } |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 254 | |
Dan Streetman | 4ee806d | 2018-01-18 16:14:26 -0500 | [diff] [blame] | 255 | static inline int check_net(const struct net *net) |
| 256 | { |
Christian Brauner | 8b8f3e6 | 2020-08-19 14:06:36 +0200 | [diff] [blame] | 257 | return refcount_read(&net->ns.count) != 0; |
Dan Streetman | 4ee806d | 2018-01-18 16:14:26 -0500 | [diff] [blame] | 258 | } |
| 259 | |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 260 | void net_drop_ns(void *); |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 261 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 262 | #else |
Eric W. Biederman | b9f75f4 | 2008-06-20 22:16:51 -0700 | [diff] [blame] | 263 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 264 | static inline struct net *get_net(struct net *net) |
| 265 | { |
| 266 | return net; |
| 267 | } |
| 268 | |
| 269 | static inline void put_net(struct net *net) |
| 270 | { |
| 271 | } |
| 272 | |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 273 | static inline struct net *maybe_get_net(struct net *net) |
| 274 | { |
| 275 | return net; |
| 276 | } |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 277 | |
| 278 | static inline |
| 279 | int net_eq(const struct net *net1, const struct net *net2) |
| 280 | { |
| 281 | return 1; |
| 282 | } |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 283 | |
Dan Streetman | 4ee806d | 2018-01-18 16:14:26 -0500 | [diff] [blame] | 284 | static inline int check_net(const struct net *net) |
| 285 | { |
| 286 | return 1; |
| 287 | } |
| 288 | |
Al Viro | a685e08 | 2011-06-08 21:13:01 -0400 | [diff] [blame] | 289 | #define net_drop_ns NULL |
Pavel Emelyanov | d465579 | 2007-11-01 00:43:49 -0700 | [diff] [blame] | 290 | #endif |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 291 | |
Denis V. Lunev | 5d1e446 | 2008-04-16 01:58:04 -0700 | [diff] [blame] | 292 | |
Eric W. Biederman | 0c5c9fb | 2015-03-11 23:06:44 -0500 | [diff] [blame] | 293 | typedef struct { |
Eric Dumazet | 8f424b5 | 2008-11-12 00:53:30 -0800 | [diff] [blame] | 294 | #ifdef CONFIG_NET_NS |
Eric W. Biederman | 0c5c9fb | 2015-03-11 23:06:44 -0500 | [diff] [blame] | 295 | struct net *net; |
Eric Dumazet | 8f424b5 | 2008-11-12 00:53:30 -0800 | [diff] [blame] | 296 | #endif |
Eric W. Biederman | 0c5c9fb | 2015-03-11 23:06:44 -0500 | [diff] [blame] | 297 | } possible_net_t; |
| 298 | |
| 299 | static inline void write_pnet(possible_net_t *pnet, struct net *net) |
| 300 | { |
| 301 | #ifdef CONFIG_NET_NS |
| 302 | pnet->net = net; |
| 303 | #endif |
| 304 | } |
| 305 | |
| 306 | static inline struct net *read_pnet(const possible_net_t *pnet) |
| 307 | { |
| 308 | #ifdef CONFIG_NET_NS |
| 309 | return pnet->net; |
| 310 | #else |
| 311 | return &init_net; |
| 312 | #endif |
| 313 | } |
Denis V. Lunev | 5d1e446 | 2008-04-16 01:58:04 -0700 | [diff] [blame] | 314 | |
Kirill Tkhai | f0b07bb1 | 2018-03-29 19:20:32 +0300 | [diff] [blame] | 315 | /* Protected by net_rwsem */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 316 | #define for_each_net(VAR) \ |
| 317 | list_for_each_entry(VAR, &net_namespace_list, list) |
Jiri Pirko | afa0df5 | 2019-09-30 10:15:09 +0200 | [diff] [blame] | 318 | #define for_each_net_continue_reverse(VAR) \ |
| 319 | list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list) |
Johannes Berg | 11a28d3 | 2009-07-10 09:51:33 +0000 | [diff] [blame] | 320 | #define for_each_net_rcu(VAR) \ |
| 321 | list_for_each_entry_rcu(VAR, &net_namespace_list, list) |
| 322 | |
Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 323 | #ifdef CONFIG_NET_NS |
| 324 | #define __net_init |
| 325 | #define __net_exit |
Denis V. Lunev | 022cbae | 2007-11-13 03:23:50 -0800 | [diff] [blame] | 326 | #define __net_initdata |
Andi Kleen | 04a6f82 | 2012-10-04 17:12:11 -0700 | [diff] [blame] | 327 | #define __net_initconst |
Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 328 | #else |
| 329 | #define __net_init __init |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 330 | #define __net_exit __ref |
Denis V. Lunev | 022cbae | 2007-11-13 03:23:50 -0800 | [diff] [blame] | 331 | #define __net_initdata __initdata |
Andi Kleen | 04a6f82 | 2012-10-04 17:12:11 -0700 | [diff] [blame] | 332 | #define __net_initconst __initconst |
Pavel Emelyanov | 4665079 | 2007-10-08 20:38:39 -0700 | [diff] [blame] | 333 | #endif |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 334 | |
Guillaume Nault | d4e4fdf | 2019-10-23 18:39:04 +0200 | [diff] [blame] | 335 | int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); |
Guillaume Nault | 56f200c | 2020-01-16 21:16:46 +0100 | [diff] [blame] | 336 | int peernet2id(const struct net *net, struct net *peer); |
| 337 | bool peernet_has_id(const struct net *net, struct net *peer); |
| 338 | struct net *get_net_ns_by_id(const struct net *net, int id); |
Nicolas Dichtel | 0c7aecd | 2015-01-15 15:11:15 +0100 | [diff] [blame] | 339 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 340 | struct pernet_operations { |
| 341 | struct list_head list; |
Kirill Tkhai | 6056415d | 2018-03-13 13:55:55 +0300 | [diff] [blame] | 342 | /* |
| 343 | * Below methods are called without any exclusive locks. |
| 344 | * More than one net may be constructed and destructed |
| 345 | * in parallel on several cpus. Every pernet_operations |
| 346 | * have to keep in mind all other pernet_operations and |
| 347 | * to introduce a locking, if they share common resources. |
| 348 | * |
Kirill Tkhai | 8518e9b | 2018-03-27 18:02:32 +0300 | [diff] [blame] | 349 | * The only time they are called with exclusive lock is |
| 350 | * from register_pernet_subsys(), unregister_pernet_subsys() |
| 351 | * register_pernet_device() and unregister_pernet_device(). |
| 352 | * |
Kirill Tkhai | 6056415d | 2018-03-13 13:55:55 +0300 | [diff] [blame] | 353 | * Exit methods using blocking RCU primitives, such as |
| 354 | * synchronize_rcu(), should be implemented via exit_batch. |
| 355 | * Then, destruction of a group of net requires single |
| 356 | * synchronize_rcu() related to these pernet_operations, |
| 357 | * instead of separate synchronize_rcu() for every net. |
| 358 | * Please, avoid synchronize_rcu() at all, where it's possible. |
Eric Dumazet | d7d9987 | 2019-06-18 11:08:59 -0700 | [diff] [blame] | 359 | * |
| 360 | * Note that a combination of pre_exit() and exit() can |
| 361 | * be used, since a synchronize_rcu() is guaranteed between |
| 362 | * the calls. |
Kirill Tkhai | 6056415d | 2018-03-13 13:55:55 +0300 | [diff] [blame] | 363 | */ |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 364 | int (*init)(struct net *net); |
Eric Dumazet | d7d9987 | 2019-06-18 11:08:59 -0700 | [diff] [blame] | 365 | void (*pre_exit)(struct net *net); |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 366 | void (*exit)(struct net *net); |
Eric W. Biederman | 72ad937 | 2009-12-03 02:29:03 +0000 | [diff] [blame] | 367 | void (*exit_batch)(struct list_head *net_exit_list); |
Alexey Dobriyan | c7d03a0 | 2016-11-17 04:58:21 +0300 | [diff] [blame] | 368 | unsigned int *id; |
Eric W. Biederman | f875bae | 2009-11-29 22:25:28 +0000 | [diff] [blame] | 369 | size_t size; |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 370 | }; |
| 371 | |
Eric W. Biederman | 17edde5 | 2009-02-22 00:11:09 -0800 | [diff] [blame] | 372 | /* |
| 373 | * Use these carefully. If you implement a network device and it |
| 374 | * needs per network namespace operations use device pernet operations, |
| 375 | * otherwise use pernet subsys operations. |
| 376 | * |
Johannes Berg | 4edf547 | 2009-07-15 06:16:34 +0000 | [diff] [blame] | 377 | * Network interfaces need to be removed from a dying netns _before_ |
| 378 | * subsys notifiers can be called, as most of the network code cleanup |
| 379 | * (which is done from subsys notifiers) runs with the assumption that |
| 380 | * dev_remove_pack has been called so no new packets will arrive during |
| 381 | * and after the cleanup functions have been called. dev_remove_pack |
| 382 | * is not per namespace so instead the guarantee of no more packets |
| 383 | * arriving in a network namespace is provided by ensuring that all |
| 384 | * network devices and all sockets have left the network namespace |
| 385 | * before the cleanup methods are called. |
Eric W. Biederman | 17edde5 | 2009-02-22 00:11:09 -0800 | [diff] [blame] | 386 | * |
| 387 | * For the longest time the ipv4 icmp code was registered as a pernet |
| 388 | * device which caused kernel oops, and panics during network |
| 389 | * namespace cleanup. So please don't get this wrong. |
| 390 | */ |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 391 | int register_pernet_subsys(struct pernet_operations *); |
| 392 | void unregister_pernet_subsys(struct pernet_operations *); |
| 393 | int register_pernet_device(struct pernet_operations *); |
| 394 | void unregister_pernet_device(struct pernet_operations *); |
Eric W. Biederman | f875bae | 2009-11-29 22:25:28 +0000 | [diff] [blame] | 395 | |
Eric W. Biederman | 95bdfcc | 2007-11-30 23:55:42 +1100 | [diff] [blame] | 396 | struct ctl_table; |
Pavel Emelyanov | d62c612 | 2008-05-19 13:45:33 -0700 | [diff] [blame] | 397 | |
Eric W. Biederman | 2ca794e | 2012-04-19 13:20:32 +0000 | [diff] [blame] | 398 | #ifdef CONFIG_SYSCTL |
Joe Perches | e67e16e | 2013-09-21 10:22:48 -0700 | [diff] [blame] | 399 | int net_sysctl_init(void); |
| 400 | struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, |
| 401 | struct ctl_table *table); |
| 402 | void unregister_net_sysctl_table(struct ctl_table_header *header); |
Eric W. Biederman | 48c7495 | 2012-04-23 12:13:02 +0000 | [diff] [blame] | 403 | #else |
| 404 | static inline int net_sysctl_init(void) { return 0; } |
| 405 | static inline struct ctl_table_header *register_net_sysctl(struct net *net, |
| 406 | const char *path, struct ctl_table *table) |
| 407 | { |
| 408 | return NULL; |
| 409 | } |
| 410 | static inline void unregister_net_sysctl_table(struct ctl_table_header *header) |
| 411 | { |
| 412 | } |
| 413 | #endif |
| 414 | |
Guillaume Nault | 56f200c | 2020-01-16 21:16:46 +0100 | [diff] [blame] | 415 | static inline int rt_genid_ipv4(const struct net *net) |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 416 | { |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 417 | return atomic_read(&net->ipv4.rt_genid); |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 418 | } |
| 419 | |
David Ahern | 8f34e53 | 2020-05-01 08:53:08 -0600 | [diff] [blame] | 420 | #if IS_ENABLED(CONFIG_IPV6) |
| 421 | static inline int rt_genid_ipv6(const struct net *net) |
| 422 | { |
| 423 | return atomic_read(&net->ipv6.fib6_sernum); |
| 424 | } |
| 425 | #endif |
| 426 | |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 427 | static inline void rt_genid_bump_ipv4(struct net *net) |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 428 | { |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 429 | atomic_inc(&net->ipv4.rt_genid); |
| 430 | } |
| 431 | |
Hannes Frederic Sowa | 705f1c8 | 2014-09-28 00:46:06 +0200 | [diff] [blame] | 432 | extern void (*__fib6_flush_trees)(struct net *net); |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 433 | static inline void rt_genid_bump_ipv6(struct net *net) |
| 434 | { |
Hannes Frederic Sowa | 705f1c8 | 2014-09-28 00:46:06 +0200 | [diff] [blame] | 435 | if (__fib6_flush_trees) |
| 436 | __fib6_flush_trees(net); |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 437 | } |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 438 | |
Luis R. Rodriguez | 599018a | 2014-04-17 18:22:54 -0700 | [diff] [blame] | 439 | #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) |
| 440 | static inline struct netns_ieee802154_lowpan * |
| 441 | net_ieee802154_lowpan(struct net *net) |
| 442 | { |
| 443 | return &net->ieee802154_lowpan; |
| 444 | } |
Luis R. Rodriguez | 599018a | 2014-04-17 18:22:54 -0700 | [diff] [blame] | 445 | #endif |
| 446 | |
fan.du | ca4c3fc | 2013-07-30 08:33:53 +0800 | [diff] [blame] | 447 | /* For callers who don't really care about whether it's IPv4 or IPv6 */ |
| 448 | static inline void rt_genid_bump_all(struct net *net) |
| 449 | { |
| 450 | rt_genid_bump_ipv4(net); |
| 451 | rt_genid_bump_ipv6(net); |
Nicolas Dichtel | b42664f | 2012-09-10 22:09:44 +0000 | [diff] [blame] | 452 | } |
Eric W. Biederman | 95bdfcc | 2007-11-30 23:55:42 +1100 | [diff] [blame] | 453 | |
Guillaume Nault | 56f200c | 2020-01-16 21:16:46 +0100 | [diff] [blame] | 454 | static inline int fnhe_genid(const struct net *net) |
Timo Teräs | 5aad1de | 2013-05-27 20:46:33 +0000 | [diff] [blame] | 455 | { |
| 456 | return atomic_read(&net->fnhe_genid); |
| 457 | } |
| 458 | |
| 459 | static inline void fnhe_genid_bump(struct net *net) |
| 460 | { |
| 461 | atomic_inc(&net->fnhe_genid); |
| 462 | } |
| 463 | |
Eric W. Biederman | 5f256be | 2007-09-12 11:50:50 +0200 | [diff] [blame] | 464 | #endif /* __NET_NET_NAMESPACE_H */ |