blob: 7ed308a0c0352d9c8c1fac62d5d9e8d1731a92c5 [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +01006 *
7 * Routines for handling Netlink messages for HSR.
8 */
9
10#include "hsr_netlink.h"
11#include <linux/kernel.h>
12#include <net/rtnetlink.h>
13#include <net/genetlink.h>
14#include "hsr_main.h"
15#include "hsr_device.h"
16#include "hsr_framereg.h"
17
18static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
Peter Heiseee1c2792016-04-13 13:52:22 +020022 [IFLA_HSR_VERSION] = { .type = NLA_U8 },
Peter Heisef9375722016-04-19 13:34:28 +020023 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
Arvid Brodin98bf8362013-11-29 23:38:16 +010024 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
Arvid Brodinf4214362013-10-30 21:10:47 +010025};
26
Arvid Brodinf4214362013-10-30 21:10:47 +010027/* Here, it seems a netdevice has already been allocated for us, and the
28 * hsr_dev_setup routine has been executed. Nice!
29 */
30static int hsr_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +020031 struct nlattr *tb[], struct nlattr *data[],
32 struct netlink_ext_ack *extack)
Arvid Brodinf4214362013-10-30 21:10:47 +010033{
34 struct net_device *link[2];
Peter Heiseee1c2792016-04-13 13:52:22 +020035 unsigned char multicast_spec, hsr_version;
Arvid Brodinf4214362013-10-30 21:10:47 +010036
Arvid Brodina718dcc2014-07-04 23:42:00 +020037 if (!data) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000038 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
Arvid Brodina718dcc2014-07-04 23:42:00 +020039 return -EINVAL;
40 }
Arvid Brodinf4214362013-10-30 21:10:47 +010041 if (!data[IFLA_HSR_SLAVE1]) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000042 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
Arvid Brodinf4214362013-10-30 21:10:47 +010043 return -EINVAL;
44 }
Murali Karicherid595b852019-04-05 13:31:23 -040045 link[0] = __dev_get_by_index(src_net,
46 nla_get_u32(data[IFLA_HSR_SLAVE1]));
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000047 if (!link[0]) {
48 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
49 return -EINVAL;
50 }
Arvid Brodinf4214362013-10-30 21:10:47 +010051 if (!data[IFLA_HSR_SLAVE2]) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000052 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
Arvid Brodinf4214362013-10-30 21:10:47 +010053 return -EINVAL;
54 }
Murali Karicherid595b852019-04-05 13:31:23 -040055 link[1] = __dev_get_by_index(src_net,
56 nla_get_u32(data[IFLA_HSR_SLAVE2]));
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000057 if (!link[1]) {
58 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
Arvid Brodinf4214362013-10-30 21:10:47 +010059 return -EINVAL;
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000060 }
61
62 if (link[0] == link[1]) {
63 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
64 return -EINVAL;
65 }
Arvid Brodinf4214362013-10-30 21:10:47 +010066
67 if (!data[IFLA_HSR_MULTICAST_SPEC])
68 multicast_spec = 0;
69 else
70 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
71
Peter Heiseee1c2792016-04-13 13:52:22 +020072 if (!data[IFLA_HSR_VERSION])
73 hsr_version = 0;
74 else
75 hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
76
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000077 return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
Arvid Brodinf4214362013-10-30 21:10:47 +010078}
79
Arvid Brodin98bf8362013-11-29 23:38:16 +010080static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
81{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020082 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +020083 struct hsr_port *port;
Arvid Brodin51f3c602014-07-04 23:37:27 +020084 int res;
Arvid Brodin98bf8362013-11-29 23:38:16 +010085
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020086 hsr = netdev_priv(dev);
Arvid Brodin98bf8362013-11-29 23:38:16 +010087
Arvid Brodin51f3c602014-07-04 23:37:27 +020088 res = 0;
Arvid Brodin98bf8362013-11-29 23:38:16 +010089
Arvid Brodin51f3c602014-07-04 23:37:27 +020090 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +020091 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
92 if (port)
93 res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +020094 rcu_read_unlock();
95 if (res)
96 goto nla_put_failure;
97
98 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +020099 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
100 if (port)
101 res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200102 rcu_read_unlock();
103 if (res)
104 goto nla_put_failure;
Arvid Brodin98bf8362013-11-29 23:38:16 +0100105
106 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200107 hsr->sup_multicast_addr) ||
108 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
Arvid Brodin98bf8362013-11-29 23:38:16 +0100109 goto nla_put_failure;
110
111 return 0;
112
113nla_put_failure:
114 return -EMSGSIZE;
115}
116
Arvid Brodinf4214362013-10-30 21:10:47 +0100117static struct rtnl_link_ops hsr_link_ops __read_mostly = {
118 .kind = "hsr",
119 .maxtype = IFLA_HSR_MAX,
120 .policy = hsr_policy,
121 .priv_size = sizeof(struct hsr_priv),
122 .setup = hsr_dev_setup,
123 .newlink = hsr_newlink,
Arvid Brodin98bf8362013-11-29 23:38:16 +0100124 .fill_info = hsr_fill_info,
Arvid Brodinf4214362013-10-30 21:10:47 +0100125};
126
Arvid Brodinf4214362013-10-30 21:10:47 +0100127/* attribute policy */
Arvid Brodinf4214362013-10-30 21:10:47 +0100128static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
Peter Heisef9375722016-04-19 13:34:28 +0200129 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
130 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
Arvid Brodinf4214362013-10-30 21:10:47 +0100131 [HSR_A_IFINDEX] = { .type = NLA_U32 },
132 [HSR_A_IF1_AGE] = { .type = NLA_U32 },
133 [HSR_A_IF2_AGE] = { .type = NLA_U32 },
134 [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
135 [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
136};
137
Johannes Berg489111e2016-10-24 14:40:03 +0200138static struct genl_family hsr_genl_family;
Arvid Brodinf4214362013-10-30 21:10:47 +0100139
Johannes Berg2a94fe42013-11-19 15:19:39 +0100140static const struct genl_multicast_group hsr_mcgrps[] = {
141 { .name = "hsr-network", },
Arvid Brodinf4214362013-10-30 21:10:47 +0100142};
143
Arvid Brodinf4214362013-10-30 21:10:47 +0100144/* This is called if for some node with MAC address addr, we only get frames
145 * over one of the slave interfaces. This would indicate an open network ring
146 * (i.e. a link has failed somewhere).
147 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200148void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
Arvid Brodinc5a75912014-07-04 23:38:05 +0200149 struct hsr_port *port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100150{
151 struct sk_buff *skb;
152 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200153 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100154 int res;
Arvid Brodinf4214362013-10-30 21:10:47 +0100155
156 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
157 if (!skb)
158 goto fail;
159
Murali Karicherid595b852019-04-05 13:31:23 -0400160 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
161 HSR_C_RING_ERROR);
Arvid Brodinf4214362013-10-30 21:10:47 +0100162 if (!msg_head)
163 goto nla_put_failure;
164
165 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
166 if (res < 0)
167 goto nla_put_failure;
168
Arvid Brodinc5a75912014-07-04 23:38:05 +0200169 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100170 if (res < 0)
171 goto nla_put_failure;
172
173 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100174 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100175
176 return;
177
178nla_put_failure:
179 kfree_skb(skb);
180
181fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200182 rcu_read_lock();
183 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
184 netdev_warn(master->dev, "Could not send HSR ring error message\n");
185 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100186}
187
188/* This is called when we haven't heard from the node with MAC address addr for
189 * some time (just before the node is removed from the node table/list).
190 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200191void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +0100192{
193 struct sk_buff *skb;
194 void *msg_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200195 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100196 int res;
197
198 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
199 if (!skb)
200 goto fail;
201
202 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
203 if (!msg_head)
204 goto nla_put_failure;
205
Arvid Brodinf4214362013-10-30 21:10:47 +0100206 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
207 if (res < 0)
208 goto nla_put_failure;
209
210 genlmsg_end(skb, msg_head);
Johannes Berg2a94fe42013-11-19 15:19:39 +0100211 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
Arvid Brodinf4214362013-10-30 21:10:47 +0100212
213 return;
214
215nla_put_failure:
216 kfree_skb(skb);
217
218fail:
Arvid Brodinc5a75912014-07-04 23:38:05 +0200219 rcu_read_lock();
220 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
221 netdev_warn(master->dev, "Could not send HSR node down\n");
222 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100223}
224
Arvid Brodinf4214362013-10-30 21:10:47 +0100225/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
226 * about the status of a specific node in the network, defined by its MAC
227 * address.
228 *
229 * Input: hsr ifindex, node mac address
230 * Output: hsr ifindex, node mac address (copied from request),
231 * age of latest frame from node over slave 1, slave 2 [ms]
232 */
233static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
234{
235 /* For receiving */
236 struct nlattr *na;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200237 struct net_device *hsr_dev;
Arvid Brodinf4214362013-10-30 21:10:47 +0100238
239 /* For sending */
240 struct sk_buff *skb_out;
241 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200242 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200243 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100244 unsigned char hsr_node_addr_b[ETH_ALEN];
245 int hsr_node_if1_age;
246 u16 hsr_node_if1_seq;
247 int hsr_node_if2_age;
248 u16 hsr_node_if2_seq;
249 int addr_b_ifindex;
250 int res;
251
252 if (!info)
253 goto invalid;
254
255 na = info->attrs[HSR_A_IFINDEX];
256 if (!na)
257 goto invalid;
258 na = info->attrs[HSR_A_NODE_ADDR];
259 if (!na)
260 goto invalid;
261
262 hsr_dev = __dev_get_by_index(genl_info_net(info),
Murali Karicherid595b852019-04-05 13:31:23 -0400263 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100264 if (!hsr_dev)
265 goto invalid;
266 if (!is_hsr_master(hsr_dev))
267 goto invalid;
268
Arvid Brodinf4214362013-10-30 21:10:47 +0100269 /* Send reply */
Arvid Brodinf4214362013-10-30 21:10:47 +0100270 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
271 if (!skb_out) {
272 res = -ENOMEM;
273 goto fail;
274 }
275
276 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400277 info->snd_seq, &hsr_genl_family, 0,
278 HSR_C_SET_NODE_STATUS);
Arvid Brodinf4214362013-10-30 21:10:47 +0100279 if (!msg_head) {
280 res = -ENOMEM;
281 goto nla_put_failure;
282 }
283
284 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
285 if (res < 0)
286 goto nla_put_failure;
287
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200288 hsr = netdev_priv(hsr_dev);
289 res = hsr_get_node_data(hsr,
Murali Karicherid595b852019-04-05 13:31:23 -0400290 (unsigned char *)
291 nla_data(info->attrs[HSR_A_NODE_ADDR]),
292 hsr_node_addr_b,
293 &addr_b_ifindex,
294 &hsr_node_if1_age,
295 &hsr_node_if1_seq,
296 &hsr_node_if2_age,
297 &hsr_node_if2_seq);
Arvid Brodinf4214362013-10-30 21:10:47 +0100298 if (res < 0)
Geyslan G. Bem84a035f2013-11-14 16:12:54 -0300299 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100300
301 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400302 nla_data(info->attrs[HSR_A_NODE_ADDR]));
Arvid Brodinf4214362013-10-30 21:10:47 +0100303 if (res < 0)
304 goto nla_put_failure;
305
306 if (addr_b_ifindex > -1) {
307 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
Murali Karicherid595b852019-04-05 13:31:23 -0400308 hsr_node_addr_b);
Arvid Brodinf4214362013-10-30 21:10:47 +0100309 if (res < 0)
310 goto nla_put_failure;
311
Murali Karicherid595b852019-04-05 13:31:23 -0400312 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
313 addr_b_ifindex);
Arvid Brodinf4214362013-10-30 21:10:47 +0100314 if (res < 0)
315 goto nla_put_failure;
316 }
317
318 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
319 if (res < 0)
320 goto nla_put_failure;
321 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
322 if (res < 0)
323 goto nla_put_failure;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200324 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +0200325 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
326 if (port)
327 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
328 port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200329 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100330 if (res < 0)
331 goto nla_put_failure;
332
333 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
334 if (res < 0)
335 goto nla_put_failure;
336 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
337 if (res < 0)
338 goto nla_put_failure;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200339 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +0200340 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
341 if (port)
342 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
343 port->dev->ifindex);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200344 rcu_read_unlock();
345 if (res < 0)
346 goto nla_put_failure;
Arvid Brodinf4214362013-10-30 21:10:47 +0100347
348 genlmsg_end(skb_out, msg_head);
349 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
350
351 return 0;
352
353invalid:
Johannes Berg2d4bc932017-04-12 14:34:04 +0200354 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
Arvid Brodinf4214362013-10-30 21:10:47 +0100355 return 0;
356
357nla_put_failure:
358 kfree_skb(skb_out);
359 /* Fall through */
360
361fail:
362 return res;
363}
364
Arvid Brodinf266a682014-07-04 23:41:03 +0200365/* Get a list of MacAddressA of all nodes known to this node (including self).
Arvid Brodinf4214362013-10-30 21:10:47 +0100366 */
367static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
368{
369 /* For receiving */
370 struct nlattr *na;
371 struct net_device *hsr_dev;
372
373 /* For sending */
374 struct sk_buff *skb_out;
375 void *msg_head;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200376 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100377 void *pos;
378 unsigned char addr[ETH_ALEN];
379 int res;
380
381 if (!info)
382 goto invalid;
383
384 na = info->attrs[HSR_A_IFINDEX];
385 if (!na)
386 goto invalid;
387
388 hsr_dev = __dev_get_by_index(genl_info_net(info),
389 nla_get_u32(info->attrs[HSR_A_IFINDEX]));
390 if (!hsr_dev)
391 goto invalid;
392 if (!is_hsr_master(hsr_dev))
393 goto invalid;
394
Arvid Brodinf4214362013-10-30 21:10:47 +0100395 /* Send reply */
Arvid Brodinf4214362013-10-30 21:10:47 +0100396 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
397 if (!skb_out) {
398 res = -ENOMEM;
399 goto fail;
400 }
401
402 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -0400403 info->snd_seq, &hsr_genl_family, 0,
404 HSR_C_SET_NODE_LIST);
Arvid Brodinf4214362013-10-30 21:10:47 +0100405 if (!msg_head) {
406 res = -ENOMEM;
407 goto nla_put_failure;
408 }
409
410 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
411 if (res < 0)
412 goto nla_put_failure;
413
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200414 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100415
416 rcu_read_lock();
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200417 pos = hsr_get_next_node(hsr, NULL, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100418 while (pos) {
419 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
420 if (res < 0) {
421 rcu_read_unlock();
422 goto nla_put_failure;
423 }
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200424 pos = hsr_get_next_node(hsr, pos, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100425 }
426 rcu_read_unlock();
427
428 genlmsg_end(skb_out, msg_head);
429 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
430
431 return 0;
432
433invalid:
Johannes Berg2d4bc932017-04-12 14:34:04 +0200434 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
Arvid Brodinf4214362013-10-30 21:10:47 +0100435 return 0;
436
437nla_put_failure:
438 kfree_skb(skb_out);
439 /* Fall through */
440
441fail:
442 return res;
443}
444
Johannes Berg4534de82013-11-14 17:14:46 +0100445static const struct genl_ops hsr_ops[] = {
Johannes Berg9504b3e2013-11-14 17:14:40 +0100446 {
447 .cmd = HSR_C_GET_NODE_STATUS,
Johannes Bergef6243a2019-04-26 14:07:31 +0200448 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100449 .flags = 0,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100450 .doit = hsr_get_node_status,
451 .dumpit = NULL,
452 },
453 {
454 .cmd = HSR_C_GET_NODE_LIST,
Johannes Bergef6243a2019-04-26 14:07:31 +0200455 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100456 .flags = 0,
Johannes Berg9504b3e2013-11-14 17:14:40 +0100457 .doit = hsr_get_node_list,
458 .dumpit = NULL,
459 },
Arvid Brodinf4214362013-10-30 21:10:47 +0100460};
461
Johannes Berg56989f62016-10-24 14:40:05 +0200462static struct genl_family hsr_genl_family __ro_after_init = {
Johannes Berg489111e2016-10-24 14:40:03 +0200463 .hdrsize = 0,
464 .name = "HSR",
465 .version = 1,
466 .maxattr = HSR_A_MAX,
Johannes Berg3b0f31f2019-03-21 22:51:02 +0100467 .policy = hsr_genl_policy,
Johannes Berg489111e2016-10-24 14:40:03 +0200468 .module = THIS_MODULE,
469 .ops = hsr_ops,
470 .n_ops = ARRAY_SIZE(hsr_ops),
471 .mcgrps = hsr_mcgrps,
472 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
473};
474
Arvid Brodinf4214362013-10-30 21:10:47 +0100475int __init hsr_netlink_init(void)
476{
477 int rc;
478
479 rc = rtnl_link_register(&hsr_link_ops);
480 if (rc)
481 goto fail_rtnl_link_register;
482
Johannes Berg489111e2016-10-24 14:40:03 +0200483 rc = genl_register_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100484 if (rc)
485 goto fail_genl_register_family;
486
Taehee Yooc6c4ccd2019-12-22 11:26:27 +0000487 hsr_debugfs_create_root();
Arvid Brodinf4214362013-10-30 21:10:47 +0100488 return 0;
489
Arvid Brodinf4214362013-10-30 21:10:47 +0100490fail_genl_register_family:
491 rtnl_link_unregister(&hsr_link_ops);
492fail_rtnl_link_register:
493
494 return rc;
495}
496
497void __exit hsr_netlink_exit(void)
498{
Arvid Brodinf4214362013-10-30 21:10:47 +0100499 genl_unregister_family(&hsr_genl_family);
Arvid Brodinf4214362013-10-30 21:10:47 +0100500 rtnl_link_unregister(&hsr_link_ops);
501}
502
503MODULE_ALIAS_RTNL_LINK("hsr");