blob: f3c8f91dbe2c1687b7960ed20b20fffc48b48e62 [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +01006 *
Murali Karicheri8f4c0e02020-07-22 10:40:16 -04007 * Routines for handling Netlink messages for HSR and PRP.
Arvid Brodinf4214362013-10-30 21:10:47 +01008 */
9
10#include "hsr_netlink.h"
11#include <linux/kernel.h>
12#include <net/rtnetlink.h>
13#include <net/genetlink.h>
14#include "hsr_main.h"
15#include "hsr_device.h"
16#include "hsr_framereg.h"
17
18static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
Peter Heiseee1c2792016-04-13 13:52:22 +020022 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
Peter Heisef9375722016-04-19 13:34:28 +020023 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
Arvid Brodin98bf8362013-11-29 23:38:16 +010024 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040025 [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
Arvid Brodinf4214362013-10-30 21:10:47 +010026};
27
Arvid Brodinf4214362013-10-30 21:10:47 +010028/* Here, it seems a netdevice has already been allocated for us, and the
29 * hsr_dev_setup routine has been executed. Nice!
30 */
31static int hsr_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +020032 struct nlattr *tb[], struct nlattr *data[],
33 struct netlink_ext_ack *extack)
Arvid Brodinf4214362013-10-30 21:10:47 +010034{
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040035 enum hsr_version proto_version;
36 unsigned char multicast_spec;
37 u8 proto = HSR_PROTOCOL_HSR;
Arvid Brodinf4214362013-10-30 21:10:47 +010038 struct net_device *link[2];
Arvid Brodinf4214362013-10-30 21:10:47 +010039
Arvid Brodina718dcc2014-07-04 23:42:00 +020040 if (!data) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000041 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
Arvid Brodina718dcc2014-07-04 23:42:00 +020042 return -EINVAL;
43 }
Arvid Brodinf4214362013-10-30 21:10:47 +010044 if (!data[IFLA_HSR_SLAVE1]) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000045 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
Arvid Brodinf4214362013-10-30 21:10:47 +010046 return -EINVAL;
47 }
Murali Karicherid595b852019-04-05 13:31:23 -040048 link[0] = __dev_get_by_index(src_net,
49 nla_get_u32(data[IFLA_HSR_SLAVE1]));
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000050 if (!link[0]) {
51 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
52 return -EINVAL;
53 }
Arvid Brodinf4214362013-10-30 21:10:47 +010054 if (!data[IFLA_HSR_SLAVE2]) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000055 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
Arvid Brodinf4214362013-10-30 21:10:47 +010056 return -EINVAL;
57 }
Murali Karicherid595b852019-04-05 13:31:23 -040058 link[1] = __dev_get_by_index(src_net,
59 nla_get_u32(data[IFLA_HSR_SLAVE2]));
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000060 if (!link[1]) {
61 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
Arvid Brodinf4214362013-10-30 21:10:47 +010062 return -EINVAL;
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000063 }
64
65 if (link[0] == link[1]) {
66 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
67 return -EINVAL;
68 }
Arvid Brodinf4214362013-10-30 21:10:47 +010069
70 if (!data[IFLA_HSR_MULTICAST_SPEC])
71 multicast_spec = 0;
72 else
73 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
74
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040075 if (data[IFLA_HSR_PROTOCOL])
76 proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
77
78 if (proto >= HSR_PROTOCOL_MAX) {
Ye Binb87f9fe2020-09-09 17:38:21 +080079 NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040080 return -EINVAL;
81 }
82
Taehee Yoo4faab8c2020-04-07 13:23:21 +000083 if (!data[IFLA_HSR_VERSION]) {
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040084 proto_version = HSR_V0;
Taehee Yoo4faab8c2020-04-07 13:23:21 +000085 } else {
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040086 if (proto == HSR_PROTOCOL_PRP) {
Ye Binb87f9fe2020-09-09 17:38:21 +080087 NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040088 return -EINVAL;
89 }
90
91 proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
92 if (proto_version > HSR_V1) {
Taehee Yoo4faab8c2020-04-07 13:23:21 +000093 NL_SET_ERR_MSG_MOD(extack,
Ye Binb87f9fe2020-09-09 17:38:21 +080094 "Only HSR version 0/1 supported");
Taehee Yoo4faab8c2020-04-07 13:23:21 +000095 return -EINVAL;
96 }
97 }
Peter Heiseee1c2792016-04-13 13:52:22 +020098
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040099 if (proto == HSR_PROTOCOL_PRP)
100 proto_version = PRP_V1;
101
102 return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
Arvid Brodinf4214362013-10-30 21:10:47 +0100103}
104
Taehee Yoode0083c2020-06-21 13:46:25 +0000105static void hsr_dellink(struct net_device *dev, struct list_head *head)
106{
107 struct hsr_priv *hsr = netdev_priv(dev);
108
109 del_timer_sync(&hsr->prune_timer);
110 del_timer_sync(&hsr->announce_timer);
111
112 hsr_debugfs_term(hsr);
113 hsr_del_ports(hsr);
114
115 hsr_del_self_node(hsr);
116 hsr_del_nodes(&hsr->node_db);
117
118 unregister_netdevice_queue(dev, head);
119}
120
Arvid Brodin98bf8362013-11-29 23:38:16 +0100121static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
122{
Taehee Yoo81390d02020-02-28 18:01:56 +0000123 struct hsr_priv *hsr = netdev_priv(dev);
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400124 u8 proto = HSR_PROTOCOL_HSR;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200125 struct hsr_port *port;
Arvid Brodin98bf8362013-11-29 23:38:16 +0100126
Arvid Brodinc5a75912014-07-04 23:38:05 +0200127 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
Taehee Yoo81390d02020-02-28 18:01:56 +0000128 if (port) {
129 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
130 goto nla_put_failure;
131 }
Arvid Brodin51f3c602014-07-04 23:37:27 +0200132
Arvid Brodinc5a75912014-07-04 23:38:05 +0200133 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
Taehee Yoo81390d02020-02-28 18:01:56 +0000134 if (port) {
135 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
136 goto nla_put_failure;
137 }
Arvid Brodin98bf8362013-11-29 23:38:16 +0100138
139 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200140 hsr->sup_multicast_addr) ||
141 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
Arvid Brodin98bf8362013-11-29 23:38:16 +0100142 goto nla_put_failure;
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400143 if (hsr->prot_version == PRP_V1)
144 proto = HSR_PROTOCOL_PRP;
145 if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
146 goto nla_put_failure;
Arvid Brodin98bf8362013-11-29 23:38:16 +0100147
148 return 0;
149
150nla_put_failure:
151 return -EMSGSIZE;
152}
153
Arvid Brodinf4214362013-10-30 21:10:47 +0100154static struct rtnl_link_ops hsr_link_ops __read_mostly = {
155 .kind = "hsr",
156 .maxtype = IFLA_HSR_MAX,
157 .policy = hsr_policy,
158 .priv_size = sizeof(struct hsr_priv),
159 .setup = hsr_dev_setup,
160 .newlink = hsr_newlink,
Taehee Yoode0083c2020-06-21 13:46:25 +0000161 .dellink = hsr_dellink,
Arvid Brodin98bf8362013-11-29 23:38:16 +0100162 .fill_info = hsr_fill_info,
Arvid Brodinf4214362013-10-30 21:10:47 +0100163};
164
Arvid Brodinf4214362013-10-30 21:10:47 +0100165/* attribute policy */
Arvid Brodinf4214362013-10-30 21:10:47 +0100166static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
Peter Heisef9375722016-04-19 13:34:28 +0200167 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
168 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
Arvid Brodinf4214362013-10-30 21:10:47 +0100169 [HSR_A_IFINDEX] = { .type = NLA_U32 },
170 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
171 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
172 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
173 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
174};
175
Johannes Berg489111e2016-10-24 14:40:03 +0200176static struct genl_family hsr_genl_family;
Arvid Brodinf4214362013-10-30 21:10:47 +0100177
Johannes Berg2a94fe42013-11-19 15:19:39 +0100178static const struct genl_multicast_group hsr_mcgrps[] = {
179 { .name = "hsr-network", },
Arvid Brodinf4214362013-10-30 21:10:47 +0100180};
181
Arvid Brodinf4214362013-10-30 21:10:47 +0100182/* This is called if for some node with MAC address addr, we only get frames
183 * over one of the slave interfaces. This would indicate an open network ring
184 * (i.e. a link has failed somewhere).
185 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200186void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
Arvid Brodinc5a75912014-07-04 23:38:05 +0200187 struct hsr_port *port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100188{
189 struct sk_buff *skb;
190 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200191 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100192 int res;
Arvid Brodinf4214362013-10-30 21:10:47 +0100193
194 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
195 if (!skb)
196 goto fail;
197
Murali Karicherid595b852019-04-05 13:31:23 -0400198 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
199 HSR_C_RING_ERROR);
Arvid Brodinf4214362013-10-30 21:10:47 +0100200 if (!msg_head)
201 goto nla_put_failure;
202
203 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
204 if (res < 0)
205 goto nla_put_failure;
206
Arvid Brodinc5a75912014-07-04 23:38:05 +0200207 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100208 if (res < 0)
209 goto nla_put_failure;
210
211 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100212 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100213
214 return;
215
216nla_put_failure:
217 kfree_skb(skb);
218
219fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200220 rcu_read_lock();
221 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
222 netdev_warn(master->dev, "Could not send HSR ring error message\n");
223 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100224}
225
226/* This is called when we haven't heard from the node with MAC address addr for
227 * some time (just before the node is removed from the node table/list).
228 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200229void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +0100230{
231 struct sk_buff *skb;
232 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200233 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100234 int res;
235
236 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
237 if (!skb)
238 goto fail;
239
240 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
241 if (!msg_head)
242 goto nla_put_failure;
243
Arvid Brodinf4214362013-10-30 21:10:47 +0100244 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
245 if (res < 0)
246 goto nla_put_failure;
247
248 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100249 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100250
251 return;
252
253nla_put_failure:
254 kfree_skb(skb);
255
256fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200257 rcu_read_lock();
258 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
259 netdev_warn(master->dev, "Could not send HSR node down\n");
260 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100261}
262
Arvid Brodinf4214362013-10-30 21:10:47 +0100263/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
264 * about the status of a specific node in the network, defined by its MAC
265 * address.
266 *
267 * Input: hsr ifindex, node mac address
268 * Output: hsr ifindex, node mac address (copied from request),
269 * age of latest frame from node over slave 1, slave 2 [ms]
270 */
271static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
272{
273 /* For receiving */
274 struct nlattr *na;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200275 struct net_device *hsr_dev;
Arvid Brodinf4214362013-10-30 21:10:47 +0100276
277 /* For sending */
278 struct sk_buff *skb_out;
279 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200280 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200281 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100282 unsigned char hsr_node_addr_b[ETH_ALEN];
283 int hsr_node_if1_age;
284 u16 hsr_node_if1_seq;
285 int hsr_node_if2_age;
286 u16 hsr_node_if2_seq;
287 int addr_b_ifindex;
288 int res;
289
290 if (!info)
291 goto invalid;
292
293 na = info->attrs[HSR_A_IFINDEX];
294 if (!na)
295 goto invalid;
296 na = info->attrs[HSR_A_NODE_ADDR];
297 if (!na)
298 goto invalid;
299
Taehee Yoo173756b2020-03-13 06:50:14 +0000300 rcu_read_lock();
301 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
302 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100303 if (!hsr_dev)
Taehee Yoo173756b2020-03-13 06:50:14 +0000304 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100305 if (!is_hsr_master(hsr_dev))
Taehee Yoo173756b2020-03-13 06:50:14 +0000306 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100307
Arvid Brodinf4214362013-10-30 21:10:47 +0100308 /* Send reply */
Taehee Yoo173756b2020-03-13 06:50:14 +0000309 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100310 if (!skb_out) {
311 res = -ENOMEM;
312 goto fail;
313 }
314
315 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400316 info->snd_seq, &hsr_genl_family, 0,
317 HSR_C_SET_NODE_STATUS);
Arvid Brodinf4214362013-10-30 21:10:47 +0100318 if (!msg_head) {
319 res = -ENOMEM;
320 goto nla_put_failure;
321 }
322
323 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
324 if (res < 0)
325 goto nla_put_failure;
326
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200327 hsr = netdev_priv(hsr_dev);
328 res = hsr_get_node_data(hsr,
Murali Karicherid595b852019-04-05 13:31:23 -0400329 (unsigned char *)
330 nla_data(info->attrs[HSR_A_NODE_ADDR]),
331 hsr_node_addr_b,
332 &addr_b_ifindex,
333 &hsr_node_if1_age,
334 &hsr_node_if1_seq,
335 &hsr_node_if2_age,
336 &hsr_node_if2_seq);
Arvid Brodinf4214362013-10-30 21:10:47 +0100337 if (res < 0)
Geyslan G. Bem84a035f2013-11-14 16:12:54 -0300338 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100339
340 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400341 nla_data(info->attrs[HSR_A_NODE_ADDR]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100342 if (res < 0)
343 goto nla_put_failure;
344
345 if (addr_b_ifindex > -1) {
346 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
Murali Karicherid595b852019-04-05 13:31:23 -0400347 hsr_node_addr_b);
Arvid Brodinf4214362013-10-30 21:10:47 +0100348 if (res < 0)
349 goto nla_put_failure;
350
Murali Karicherid595b852019-04-05 13:31:23 -0400351 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
352 addr_b_ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100353 if (res < 0)
354 goto nla_put_failure;
355 }
356
357 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
358 if (res < 0)
359 goto nla_put_failure;
360 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
361 if (res < 0)
362 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200363 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
364 if (port)
365 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
366 port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100367 if (res < 0)
368 goto nla_put_failure;
369
370 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
371 if (res < 0)
372 goto nla_put_failure;
373 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
374 if (res < 0)
375 goto nla_put_failure;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200376 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
377 if (port)
378 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
379 port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200380 if (res < 0)
381 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100382
Taehee Yoo173756b2020-03-13 06:50:14 +0000383 rcu_read_unlock();
384
Arvid Brodinf4214362013-10-30 21:10:47 +0100385 genlmsg_end(skb_out, msg_head);
386 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
387
388 return 0;
389
Taehee Yoo173756b2020-03-13 06:50:14 +0000390rcu_unlock:
391 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100392invalid:
Johannes Berg2d4bc932017-04-12 14:34:04 +0200393 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
Arvid Brodinf4214362013-10-30 21:10:47 +0100394 return 0;
395
396nla_put_failure:
397 kfree_skb(skb_out);
398 /* Fall through */
399
400fail:
Taehee Yoo173756b2020-03-13 06:50:14 +0000401 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100402 return res;
403}
404
Arvid Brodinf266a682014-07-04 23:41:03 +0200405/* Get a list of MacAddressA of all nodes known to this node (including self).
Arvid Brodinf4214362013-10-30 21:10:47 +0100406 */
407static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
408{
Arvid Brodinf4214362013-10-30 21:10:47 +0100409 unsigned char addr[ETH_ALEN];
Taehee Yooca19c702020-03-13 06:50:24 +0000410 struct net_device *hsr_dev;
411 struct sk_buff *skb_out;
412 struct hsr_priv *hsr;
413 bool restart = false;
414 struct nlattr *na;
415 void *pos = NULL;
416 void *msg_head;
Arvid Brodinf4214362013-10-30 21:10:47 +0100417 int res;
418
419 if (!info)
420 goto invalid;
421
422 na = info->attrs[HSR_A_IFINDEX];
423 if (!na)
424 goto invalid;
425
Taehee Yoo173756b2020-03-13 06:50:14 +0000426 rcu_read_lock();
427 hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
428 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100429 if (!hsr_dev)
Taehee Yoo173756b2020-03-13 06:50:14 +0000430 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100431 if (!is_hsr_master(hsr_dev))
Taehee Yoo173756b2020-03-13 06:50:14 +0000432 goto rcu_unlock;
Arvid Brodinf4214362013-10-30 21:10:47 +0100433
Taehee Yooca19c702020-03-13 06:50:24 +0000434restart:
Arvid Brodinf4214362013-10-30 21:10:47 +0100435 /* Send reply */
Taehee Yooca19c702020-03-13 06:50:24 +0000436 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100437 if (!skb_out) {
438 res = -ENOMEM;
439 goto fail;
440 }
441
442 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400443 info->snd_seq, &hsr_genl_family, 0,
444 HSR_C_SET_NODE_LIST);
Arvid Brodinf4214362013-10-30 21:10:47 +0100445 if (!msg_head) {
446 res = -ENOMEM;
447 goto nla_put_failure;
448 }
449
Taehee Yooca19c702020-03-13 06:50:24 +0000450 if (!restart) {
451 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
452 if (res < 0)
453 goto nla_put_failure;
454 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100455
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200456 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100457
Taehee Yooca19c702020-03-13 06:50:24 +0000458 if (!pos)
459 pos = hsr_get_next_node(hsr, NULL, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100460 while (pos) {
461 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
462 if (res < 0) {
Taehee Yooca19c702020-03-13 06:50:24 +0000463 if (res == -EMSGSIZE) {
464 genlmsg_end(skb_out, msg_head);
465 genlmsg_unicast(genl_info_net(info), skb_out,
466 info->snd_portid);
467 restart = true;
468 goto restart;
469 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100470 goto nla_put_failure;
471 }
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200472 pos = hsr_get_next_node(hsr, pos, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100473 }
474 rcu_read_unlock();
475
476 genlmsg_end(skb_out, msg_head);
477 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
478
479 return 0;
480
Taehee Yoo173756b2020-03-13 06:50:14 +0000481rcu_unlock:
482 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100483invalid:
Johannes Berg2d4bc932017-04-12 14:34:04 +0200484 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
Arvid Brodinf4214362013-10-30 21:10:47 +0100485 return 0;
486
487nla_put_failure:
Taehee Yooca19c702020-03-13 06:50:24 +0000488 nlmsg_free(skb_out);
Arvid Brodinf4214362013-10-30 21:10:47 +0100489 /* Fall through */
490
491fail:
Taehee Yoo173756b2020-03-13 06:50:14 +0000492 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100493 return res;
494}
495
Jakub Kicinski66a9b922020-10-02 14:49:54 -0700496static const struct genl_small_ops hsr_ops[] = {
Johannes Berg9504b3e2013-11-14 17:14:40 +0100497 {
498 .cmd = HSR_C_GET_NODE_STATUS,
Johannes Bergef6243a2019-04-26 14:07:31 +0200499 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100500 .flags = 0,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100501 .doit = hsr_get_node_status,
502 .dumpit = NULL,
503 },
504 {
505 .cmd = HSR_C_GET_NODE_LIST,
Johannes Bergef6243a2019-04-26 14:07:31 +0200506 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100507 .flags = 0,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100508 .doit = hsr_get_node_list,
509 .dumpit = NULL,
510 },
Arvid Brodinf4214362013-10-30 21:10:47 +0100511};
512
Johannes Berg56989f62016-10-24 14:40:05 +0200513static struct genl_family hsr_genl_family __ro_after_init = {
Johannes Berg489111e2016-10-24 14:40:03 +0200514 .hdrsize = 0,
515 .name = "HSR",
516 .version = 1,
517 .maxattr = HSR_A_MAX,
Johannes Berg3b0f31f2019-03-21 22:51:02 +0100518 .policy = hsr_genl_policy,
Taehee Yoo09e91db2020-03-13 06:50:33 +0000519 .netnsok = true,
Johannes Berg489111e2016-10-24 14:40:03 +0200520 .module = THIS_MODULE,
Jakub Kicinski66a9b922020-10-02 14:49:54 -0700521 .small_ops = hsr_ops,
522 .n_small_ops = ARRAY_SIZE(hsr_ops),
Johannes Berg489111e2016-10-24 14:40:03 +0200523 .mcgrps = hsr_mcgrps,
524 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
525};
526
Arvid Brodinf4214362013-10-30 21:10:47 +0100527int __init hsr_netlink_init(void)
528{
529 int rc;
530
531 rc = rtnl_link_register(&hsr_link_ops);
532 if (rc)
533 goto fail_rtnl_link_register;
534
Johannes Berg489111e2016-10-24 14:40:03 +0200535 rc = genl_register_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100536 if (rc)
537 goto fail_genl_register_family;
538
Taehee Yooc6c4ccd2019-12-22 11:26:27 +0000539 hsr_debugfs_create_root();
Arvid Brodinf4214362013-10-30 21:10:47 +0100540 return 0;
541
Arvid Brodinf4214362013-10-30 21:10:47 +0100542fail_genl_register_family:
543 rtnl_link_unregister(&hsr_link_ops);
544fail_rtnl_link_register:
545
546 return rc;
547}
548
549void __exit hsr_netlink_exit(void)
550{
Arvid Brodinf4214362013-10-30 21:10:47 +0100551 genl_unregister_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100552 rtnl_link_unregister(&hsr_link_ops);
553}
554
555MODULE_ALIAS_RTNL_LINK("hsr");