Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 1 | /* |
| 2 | * net/tipc/net.c: TIPC network routing code |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 3 | * |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 4 | * Copyright (c) 1995-2006, 2014, Ericsson AB |
Allan Stephens | 9df3b7e | 2011-02-24 13:20:20 -0500 | [diff] [blame] | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 6 | * All rights reserved. |
| 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without |
| 9 | * modification, are permitted provided that the following conditions are met: |
| 10 | * |
Per Liden | 9ea1fd3 | 2006-01-11 13:30:43 +0100 | [diff] [blame] | 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in the |
| 15 | * documentation and/or other materials provided with the distribution. |
| 16 | * 3. Neither the names of the copyright holders nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * Alternatively, this software may be distributed under the terms of the |
| 21 | * GNU General Public License ("GPL") version 2 as published by the Free |
| 22 | * Software Foundation. |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 23 | * |
| 24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 34 | * POSSIBILITY OF SUCH DAMAGE. |
| 35 | */ |
| 36 | |
| 37 | #include "core.h" |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 38 | #include "net.h" |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 39 | #include "name_distr.h" |
| 40 | #include "subscr.h" |
Jon Paul Maloy | 9816f06 | 2014-05-14 05:39:15 -0400 | [diff] [blame] | 41 | #include "socket.h" |
Allan Stephens | 672d99e | 2011-02-25 18:42:52 -0500 | [diff] [blame] | 42 | #include "node.h" |
Jon Paul Maloy | a6bf70f | 2015-05-14 10:46:13 -0400 | [diff] [blame] | 43 | #include "bcast.h" |
Richard Alpe | 49cc66e | 2016-03-04 17:04:42 +0100 | [diff] [blame] | 44 | #include "netlink.h" |
Hoang Le | 46cb01e | 2019-11-12 07:40:04 +0700 | [diff] [blame] | 45 | #include "monitor.h" |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 46 | |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 47 | /* |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 48 | * The TIPC locking policy is designed to ensure a very fine locking |
| 49 | * granularity, permitting complete parallel access to individual |
Ying Xue | 7216cd9 | 2014-04-21 10:55:48 +0800 | [diff] [blame] | 50 | * port and node/link instances. The code consists of four major |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 51 | * locking domains, each protected with their own disjunct set of locks. |
| 52 | * |
Ying Xue | 7216cd9 | 2014-04-21 10:55:48 +0800 | [diff] [blame] | 53 | * 1: The bearer level. |
| 54 | * RTNL lock is used to serialize the process of configuring bearer |
| 55 | * on update side, and RCU lock is applied on read side to make |
| 56 | * bearer instance valid on both paths of message transmission and |
| 57 | * reception. |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 58 | * |
Ying Xue | 7216cd9 | 2014-04-21 10:55:48 +0800 | [diff] [blame] | 59 | * 2: The node and link level. |
| 60 | * All node instances are saved into two tipc_node_list and node_htable |
| 61 | * lists. The two lists are protected by node_list_lock on write side, |
| 62 | * and they are guarded with RCU lock on read side. Especially node |
| 63 | * instance is destroyed only when TIPC module is removed, and we can |
| 64 | * confirm that there has no any user who is accessing the node at the |
| 65 | * moment. Therefore, Except for iterating the two lists within RCU |
| 66 | * protection, it's no needed to hold RCU that we access node instance |
| 67 | * in other places. |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 68 | * |
Ying Xue | 7216cd9 | 2014-04-21 10:55:48 +0800 | [diff] [blame] | 69 | * In addition, all members in node structure including link instances |
| 70 | * are protected by node spin lock. |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 71 | * |
Ying Xue | 7216cd9 | 2014-04-21 10:55:48 +0800 | [diff] [blame] | 72 | * 3: The transport level of the protocol. |
| 73 | * This consists of the structures port, (and its user level |
| 74 | * representations, such as user_port and tipc_sock), reference and |
| 75 | * tipc_user (port.c, reg.c, socket.c). |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 76 | * |
Ying Xue | 7216cd9 | 2014-04-21 10:55:48 +0800 | [diff] [blame] | 77 | * This layer has four different locks: |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 78 | * - The tipc_port spin_lock. This is protecting each port instance |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 79 | * from parallel data access and removal. Since we can not place |
| 80 | * this lock in the port itself, it has been placed in the |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 81 | * corresponding reference table entry, which has the same life |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 82 | * cycle as the module. This entry is difficult to access from |
| 83 | * outside the TIPC core, however, so a pointer to the lock has |
| 84 | * been added in the port instance, -to be used for unlocking |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 85 | * only. |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 86 | * - A read/write lock to protect the reference table itself (teg.c). |
| 87 | * (Nobody is using read-only access to this, so it can just as |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 88 | * well be changed to a spin_lock) |
| 89 | * - A spin lock to protect the registry of kernel/driver users (reg.c) |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 90 | * - A global spin_lock (tipc_port_lock), which only task is to ensure |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 91 | * consistency where more than one port is involved in an operation, |
| 92 | * i.e., whe a port is part of a linked list of ports. |
| 93 | * There are two such lists; 'port_list', which is used for management, |
| 94 | * and 'wait_list', which is used to queue ports during congestion. |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 95 | * |
Ying Xue | 7216cd9 | 2014-04-21 10:55:48 +0800 | [diff] [blame] | 96 | * 4: The name table (name_table.c, name_distr.c, subscription.c) |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 97 | * - There is one big read/write-lock (tipc_nametbl_lock) protecting the |
| 98 | * overall name table structure. Nothing must be added/removed to |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 99 | * this structure without holding write access to it. |
| 100 | * - There is one local spin_lock per sub_sequence, which can be seen |
Per Liden | 4323add | 2006-01-18 00:38:21 +0100 | [diff] [blame] | 101 | * as a sub-domain to the tipc_nametbl_lock domain. It is used only |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 102 | * for translation operations, and is needed because a translation |
| 103 | * steps the root of the 'publication' linked list between each lookup. |
Per Liden | 4323add | 2006-01-18 00:38:21 +0100 | [diff] [blame] | 104 | * This is always used within the scope of a tipc_nametbl_lock(read). |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 105 | * - A local spin_lock protecting the queue of subscriber events. |
| 106 | */ |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 107 | |
Jon Maloy | adba75b | 2018-11-16 16:55:04 -0500 | [diff] [blame] | 108 | struct tipc_net_work { |
| 109 | struct work_struct work; |
| 110 | struct net *net; |
| 111 | u32 addr; |
| 112 | }; |
| 113 | |
| 114 | static void tipc_net_finalize(struct net *net, u32 addr); |
| 115 | |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 116 | int tipc_net_init(struct net *net, u8 *node_id, u32 addr) |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 117 | { |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 118 | if (tipc_own_id(net)) { |
| 119 | pr_info("Cannot configure node identity twice\n"); |
| 120 | return -1; |
| 121 | } |
| 122 | pr_info("Started in network mode\n"); |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 123 | |
Jon Maloy | 25b0b9c | 2018-03-22 20:42:51 +0100 | [diff] [blame] | 124 | if (node_id) |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 125 | tipc_set_node_id(net, node_id); |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 126 | if (addr) |
| 127 | tipc_net_finalize(net, addr); |
| 128 | return 0; |
| 129 | } |
Herbert Xu | 40f9f43 | 2017-02-11 19:26:46 +0800 | [diff] [blame] | 130 | |
Jon Maloy | adba75b | 2018-11-16 16:55:04 -0500 | [diff] [blame] | 131 | static void tipc_net_finalize(struct net *net, u32 addr) |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 132 | { |
Jon Maloy | 9faa89d | 2018-07-06 20:10:06 +0200 | [diff] [blame] | 133 | struct tipc_net *tn = tipc_net(net); |
| 134 | |
Jon Maloy | adba75b | 2018-11-16 16:55:04 -0500 | [diff] [blame] | 135 | if (cmpxchg(&tn->node_addr, 0, addr)) |
| 136 | return; |
| 137 | tipc_set_node_addr(net, addr); |
| 138 | tipc_named_reinit(net); |
| 139 | tipc_sk_reinit(net); |
Hoang Le | 46cb01e | 2019-11-12 07:40:04 +0700 | [diff] [blame] | 140 | tipc_mon_reinit_self(net); |
Jon Maloy | adba75b | 2018-11-16 16:55:04 -0500 | [diff] [blame] | 141 | tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, |
| 142 | TIPC_CLUSTER_SCOPE, 0, addr); |
| 143 | } |
| 144 | |
| 145 | static void tipc_net_finalize_work(struct work_struct *work) |
| 146 | { |
| 147 | struct tipc_net_work *fwork; |
| 148 | |
| 149 | fwork = container_of(work, struct tipc_net_work, work); |
| 150 | tipc_net_finalize(fwork->net, fwork->addr); |
| 151 | kfree(fwork); |
| 152 | } |
| 153 | |
| 154 | void tipc_sched_net_finalize(struct net *net, u32 addr) |
| 155 | { |
| 156 | struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC); |
| 157 | |
| 158 | if (!fwork) |
| 159 | return; |
| 160 | INIT_WORK(&fwork->work, tipc_net_finalize_work); |
| 161 | fwork->net = net; |
| 162 | fwork->addr = addr; |
| 163 | schedule_work(&fwork->work); |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 164 | } |
| 165 | |
Ying Xue | f2f9800 | 2015-01-09 15:27:05 +0800 | [diff] [blame] | 166 | void tipc_net_stop(struct net *net) |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 167 | { |
Xin Long | 9926cb5 | 2019-03-24 00:48:22 +0800 | [diff] [blame] | 168 | if (!tipc_own_id(net)) |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 169 | return; |
Ying Xue | 46651c5 | 2014-03-27 12:54:36 +0800 | [diff] [blame] | 170 | |
Ying Xue | f97e455 | 2014-04-21 10:55:44 +0800 | [diff] [blame] | 171 | rtnl_lock(); |
Ying Xue | f2f9800 | 2015-01-09 15:27:05 +0800 | [diff] [blame] | 172 | tipc_bearer_stop(net); |
Ying Xue | f2f9800 | 2015-01-09 15:27:05 +0800 | [diff] [blame] | 173 | tipc_node_stop(net); |
Ying Xue | f97e455 | 2014-04-21 10:55:44 +0800 | [diff] [blame] | 174 | rtnl_unlock(); |
Ying Xue | 46651c5 | 2014-03-27 12:54:36 +0800 | [diff] [blame] | 175 | |
Erik Hugne | 2cf8aa1 | 2012-06-29 00:16:37 -0400 | [diff] [blame] | 176 | pr_info("Left network mode\n"); |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 177 | } |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 178 | |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 179 | static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg) |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 180 | { |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 181 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 182 | u64 *w0 = (u64 *)&tn->node_id[0]; |
| 183 | u64 *w1 = (u64 *)&tn->node_id[8]; |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 184 | struct nlattr *attrs; |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 185 | void *hdr; |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 186 | |
Richard Alpe | bfb3e5d | 2015-02-09 09:50:03 +0100 | [diff] [blame] | 187 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 188 | NLM_F_MULTI, TIPC_NL_NET_GET); |
| 189 | if (!hdr) |
| 190 | return -EMSGSIZE; |
| 191 | |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 192 | attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NET); |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 193 | if (!attrs) |
| 194 | goto msg_full; |
| 195 | |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 196 | if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id)) |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 197 | goto attr_msg_full; |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 198 | if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID, *w0, 0)) |
| 199 | goto attr_msg_full; |
| 200 | if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID_W1, *w1, 0)) |
| 201 | goto attr_msg_full; |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 202 | nla_nest_end(msg->skb, attrs); |
| 203 | genlmsg_end(msg->skb, hdr); |
| 204 | |
| 205 | return 0; |
| 206 | |
| 207 | attr_msg_full: |
| 208 | nla_nest_cancel(msg->skb, attrs); |
| 209 | msg_full: |
| 210 | genlmsg_cancel(msg->skb, hdr); |
| 211 | |
| 212 | return -EMSGSIZE; |
| 213 | } |
| 214 | |
| 215 | int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) |
| 216 | { |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 217 | struct net *net = sock_net(skb->sk); |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 218 | int err; |
| 219 | int done = cb->args[0]; |
| 220 | struct tipc_nl_msg msg; |
| 221 | |
| 222 | if (done) |
| 223 | return 0; |
| 224 | |
| 225 | msg.skb = skb; |
| 226 | msg.portid = NETLINK_CB(cb->skb).portid; |
| 227 | msg.seq = cb->nlh->nlmsg_seq; |
| 228 | |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 229 | err = __tipc_nl_add_net(net, &msg); |
Richard Alpe | fd3cf2a | 2014-11-20 10:29:18 +0100 | [diff] [blame] | 230 | if (err) |
| 231 | goto out; |
| 232 | |
| 233 | done = 1; |
| 234 | out: |
| 235 | cb->args[0] = done; |
| 236 | |
| 237 | return skb->len; |
| 238 | } |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 239 | |
Ying Xue | 5631f65 | 2018-02-14 13:38:03 +0800 | [diff] [blame] | 240 | int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 241 | { |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 242 | struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; |
Jon Maloy | 23fd3ea | 2018-03-22 20:42:49 +0100 | [diff] [blame] | 243 | struct net *net = sock_net(skb->sk); |
| 244 | struct tipc_net *tn = tipc_net(net); |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 245 | int err; |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 246 | |
| 247 | if (!info->attrs[TIPC_NLA_NET]) |
| 248 | return -EINVAL; |
| 249 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 250 | err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, |
| 251 | info->attrs[TIPC_NLA_NET], |
| 252 | tipc_nl_net_policy, info->extack); |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 253 | |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 254 | if (err) |
| 255 | return err; |
| 256 | |
Jon Maloy | 23fd3ea | 2018-03-22 20:42:49 +0100 | [diff] [blame] | 257 | /* Can't change net id once TIPC has joined a network */ |
| 258 | if (tipc_own_addr(net)) |
| 259 | return -EPERM; |
| 260 | |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 261 | if (attrs[TIPC_NLA_NET_ID]) { |
| 262 | u32 val; |
| 263 | |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 264 | val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); |
| 265 | if (val < 1 || val > 9999) |
| 266 | return -EINVAL; |
| 267 | |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 268 | tn->net_id = val; |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | if (attrs[TIPC_NLA_NET_ADDR]) { |
| 272 | u32 addr; |
| 273 | |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 274 | addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); |
Jon Maloy | 2026364 | 2018-03-22 20:42:47 +0100 | [diff] [blame] | 275 | if (!addr) |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 276 | return -EINVAL; |
Jon Maloy | b89afb1 | 2018-03-22 20:42:48 +0100 | [diff] [blame] | 277 | tn->legacy_addr_format = true; |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 278 | tipc_net_init(net, NULL, addr); |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 279 | } |
| 280 | |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 281 | if (attrs[TIPC_NLA_NET_NODEID]) { |
| 282 | u8 node_id[NODE_ID_LEN]; |
| 283 | u64 *w0 = (u64 *)&node_id[0]; |
| 284 | u64 *w1 = (u64 *)&node_id[8]; |
| 285 | |
Eric Dumazet | c640412 | 2018-04-16 08:29:43 -0700 | [diff] [blame] | 286 | if (!attrs[TIPC_NLA_NET_NODEID_W1]) |
| 287 | return -EINVAL; |
Jon Maloy | d50ccc2 | 2018-03-22 20:42:50 +0100 | [diff] [blame] | 288 | *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); |
| 289 | *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); |
| 290 | tipc_net_init(net, node_id, 0); |
| 291 | } |
Richard Alpe | 27c2141 | 2014-11-20 10:29:19 +0100 | [diff] [blame] | 292 | return 0; |
| 293 | } |
Ying Xue | 5631f65 | 2018-02-14 13:38:03 +0800 | [diff] [blame] | 294 | |
| 295 | int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) |
| 296 | { |
| 297 | int err; |
| 298 | |
| 299 | rtnl_lock(); |
| 300 | err = __tipc_nl_net_set(skb, info); |
| 301 | rtnl_unlock(); |
| 302 | |
| 303 | return err; |
| 304 | } |
John Rutherford | e1b5e59 | 2019-12-19 16:03:57 +1100 | [diff] [blame] | 305 | |
| 306 | static int __tipc_nl_addr_legacy_get(struct net *net, struct tipc_nl_msg *msg) |
| 307 | { |
| 308 | struct tipc_net *tn = tipc_net(net); |
| 309 | struct nlattr *attrs; |
| 310 | void *hdr; |
| 311 | |
| 312 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
| 313 | 0, TIPC_NL_ADDR_LEGACY_GET); |
| 314 | if (!hdr) |
| 315 | return -EMSGSIZE; |
| 316 | |
| 317 | attrs = nla_nest_start(msg->skb, TIPC_NLA_NET); |
| 318 | if (!attrs) |
| 319 | goto msg_full; |
| 320 | |
| 321 | if (tn->legacy_addr_format) |
| 322 | if (nla_put_flag(msg->skb, TIPC_NLA_NET_ADDR_LEGACY)) |
| 323 | goto attr_msg_full; |
| 324 | |
| 325 | nla_nest_end(msg->skb, attrs); |
| 326 | genlmsg_end(msg->skb, hdr); |
| 327 | |
| 328 | return 0; |
| 329 | |
| 330 | attr_msg_full: |
| 331 | nla_nest_cancel(msg->skb, attrs); |
| 332 | msg_full: |
| 333 | genlmsg_cancel(msg->skb, hdr); |
| 334 | |
| 335 | return -EMSGSIZE; |
| 336 | } |
| 337 | |
| 338 | int tipc_nl_net_addr_legacy_get(struct sk_buff *skb, struct genl_info *info) |
| 339 | { |
| 340 | struct net *net = sock_net(skb->sk); |
| 341 | struct tipc_nl_msg msg; |
| 342 | struct sk_buff *rep; |
| 343 | int err; |
| 344 | |
| 345 | rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); |
| 346 | if (!rep) |
| 347 | return -ENOMEM; |
| 348 | |
| 349 | msg.skb = rep; |
| 350 | msg.portid = info->snd_portid; |
| 351 | msg.seq = info->snd_seq; |
| 352 | |
| 353 | err = __tipc_nl_addr_legacy_get(net, &msg); |
| 354 | if (err) { |
| 355 | nlmsg_free(msg.skb); |
| 356 | return err; |
| 357 | } |
| 358 | |
| 359 | return genlmsg_reply(msg.skb, info); |
| 360 | } |