blob: cd99f548e4400faa771168a3c73026c68a480a9f [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +01006 *
7 * This file contains device methods for creating, using and destroying
8 * virtual HSR devices.
9 */
10
11#include <linux/netdevice.h>
12#include <linux/skbuff.h>
13#include <linux/etherdevice.h>
Arvid Brodinf4214362013-10-30 21:10:47 +010014#include <linux/rtnetlink.h>
15#include <linux/pkt_sched.h>
16#include "hsr_device.h"
Arvid Brodin81ba6af2014-07-04 23:35:24 +020017#include "hsr_slave.h"
Arvid Brodinf4214362013-10-30 21:10:47 +010018#include "hsr_framereg.h"
19#include "hsr_main.h"
Arvid Brodinf266a682014-07-04 23:41:03 +020020#include "hsr_forward.h"
Arvid Brodinf4214362013-10-30 21:10:47 +010021
Arvid Brodinf4214362013-10-30 21:10:47 +010022static bool is_admin_up(struct net_device *dev)
23{
24 return dev && (dev->flags & IFF_UP);
25}
26
27static bool is_slave_up(struct net_device *dev)
28{
29 return dev && is_admin_up(dev) && netif_oper_up(dev);
30}
31
32static void __hsr_set_operstate(struct net_device *dev, int transition)
33{
34 write_lock_bh(&dev_base_lock);
35 if (dev->operstate != transition) {
36 dev->operstate = transition;
37 write_unlock_bh(&dev_base_lock);
38 netdev_state_change(dev);
39 } else {
40 write_unlock_bh(&dev_base_lock);
41 }
42}
43
Arvid Brodinc5a75912014-07-04 23:38:05 +020044static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
Arvid Brodinf4214362013-10-30 21:10:47 +010045{
Arvid Brodinc5a75912014-07-04 23:38:05 +020046 if (!is_admin_up(master->dev)) {
47 __hsr_set_operstate(master->dev, IF_OPER_DOWN);
Arvid Brodinf4214362013-10-30 21:10:47 +010048 return;
49 }
50
Arvid Brodine9aae562014-07-04 23:36:40 +020051 if (has_carrier)
Arvid Brodinc5a75912014-07-04 23:38:05 +020052 __hsr_set_operstate(master->dev, IF_OPER_UP);
Arvid Brodinf4214362013-10-30 21:10:47 +010053 else
Arvid Brodinc5a75912014-07-04 23:38:05 +020054 __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
Arvid Brodinf4214362013-10-30 21:10:47 +010055}
56
Arvid Brodinc5a75912014-07-04 23:38:05 +020057static bool hsr_check_carrier(struct hsr_port *master)
Arvid Brodinf4214362013-10-30 21:10:47 +010058{
Arvid Brodinc5a75912014-07-04 23:38:05 +020059 struct hsr_port *port;
Arvid Brodine9aae562014-07-04 23:36:40 +020060
Taehee Yoo81390d02020-02-28 18:01:56 +000061 ASSERT_RTNL();
Arvid Brodinc5a75912014-07-04 23:38:05 +020062
Taehee Yoo81390d02020-02-28 18:01:56 +000063 hsr_for_each_port(master->hsr, port) {
Murali Karicheri56703422019-04-05 13:31:25 -040064 if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
Taehee Yoo81390d02020-02-28 18:01:56 +000065 netif_carrier_on(master->dev);
66 return true;
Arvid Brodinc5a75912014-07-04 23:38:05 +020067 }
Taehee Yoo81390d02020-02-28 18:01:56 +000068 }
Arvid Brodine9aae562014-07-04 23:36:40 +020069
Taehee Yoo81390d02020-02-28 18:01:56 +000070 netif_carrier_off(master->dev);
Arvid Brodine9aae562014-07-04 23:36:40 +020071
Taehee Yoo81390d02020-02-28 18:01:56 +000072 return false;
Arvid Brodinf4214362013-10-30 21:10:47 +010073}
74
Arvid Brodine9aae562014-07-04 23:36:40 +020075static void hsr_check_announce(struct net_device *hsr_dev,
76 unsigned char old_operstate)
Arvid Brodinf4214362013-10-30 21:10:47 +010077{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020078 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +010079
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020080 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +010081
Murali Karicheri56703422019-04-05 13:31:25 -040082 if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
Arvid Brodinf4214362013-10-30 21:10:47 +010083 /* Went up */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020084 hsr->announce_count = 0;
Eric Dumazet1e027962019-03-07 09:36:33 -080085 mod_timer(&hsr->announce_timer,
86 jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
Arvid Brodinf4214362013-10-30 21:10:47 +010087 }
88
Murali Karicheri56703422019-04-05 13:31:25 -040089 if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
Arvid Brodinf4214362013-10-30 21:10:47 +010090 /* Went down */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020091 del_timer(&hsr->announce_timer);
Arvid Brodinf4214362013-10-30 21:10:47 +010092}
93
Arvid Brodine9aae562014-07-04 23:36:40 +020094void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
95{
Arvid Brodinc5a75912014-07-04 23:38:05 +020096 struct hsr_port *master;
Arvid Brodine9aae562014-07-04 23:36:40 +020097 unsigned char old_operstate;
98 bool has_carrier;
99
Arvid Brodinc5a75912014-07-04 23:38:05 +0200100 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
Arvid Brodine9aae562014-07-04 23:36:40 +0200101 /* netif_stacked_transfer_operstate() cannot be used here since
102 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
103 */
Arvid Brodinc5a75912014-07-04 23:38:05 +0200104 old_operstate = master->dev->operstate;
105 has_carrier = hsr_check_carrier(master);
106 hsr_set_operstate(master, has_carrier);
107 hsr_check_announce(master->dev, old_operstate);
Arvid Brodine9aae562014-07-04 23:36:40 +0200108}
109
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200110int hsr_get_max_mtu(struct hsr_priv *hsr)
Arvid Brodinf4214362013-10-30 21:10:47 +0100111{
Arvid Brodin51f3c602014-07-04 23:37:27 +0200112 unsigned int mtu_max;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200113 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100114
Arvid Brodin51f3c602014-07-04 23:37:27 +0200115 mtu_max = ETH_DATA_LEN;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200116 hsr_for_each_port(hsr, port)
117 if (port->type != HSR_PT_MASTER)
118 mtu_max = min(port->dev->mtu, mtu_max);
Arvid Brodinf4214362013-10-30 21:10:47 +0100119
Arvid Brodin51f3c602014-07-04 23:37:27 +0200120 if (mtu_max < HSR_HLEN)
121 return 0;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200122 return mtu_max - HSR_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100123}
124
125static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
126{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200127 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100128
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200129 hsr = netdev_priv(dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100130
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200131 if (new_mtu > hsr_get_max_mtu(hsr)) {
Taehee Yoof30e4722020-04-24 12:43:09 +0000132 netdev_info(dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200133 HSR_HLEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100134 return -EINVAL;
135 }
136
137 dev->mtu = new_mtu;
138
139 return 0;
140}
141
142static int hsr_dev_open(struct net_device *dev)
143{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200144 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200145 struct hsr_port *port;
146 char designation;
Arvid Brodinf4214362013-10-30 21:10:47 +0100147
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200148 hsr = netdev_priv(dev);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200149 designation = '\0';
Arvid Brodinf4214362013-10-30 21:10:47 +0100150
Arvid Brodinc5a75912014-07-04 23:38:05 +0200151 hsr_for_each_port(hsr, port) {
152 if (port->type == HSR_PT_MASTER)
153 continue;
154 switch (port->type) {
155 case HSR_PT_SLAVE_A:
156 designation = 'A';
157 break;
158 case HSR_PT_SLAVE_B:
159 designation = 'B';
160 break;
161 default:
162 designation = '?';
163 }
164 if (!is_slave_up(port->dev))
165 netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
166 designation, port->dev->name);
Arvid Brodinf4214362013-10-30 21:10:47 +0100167 }
Arvid Brodinc5a75912014-07-04 23:38:05 +0200168
169 if (designation == '\0')
170 netdev_warn(dev, "No slave devices configured\n");
Arvid Brodinf4214362013-10-30 21:10:47 +0100171
172 return 0;
173}
174
175static int hsr_dev_close(struct net_device *dev)
176{
Arvid Brodinc5a75912014-07-04 23:38:05 +0200177 /* Nothing to do here. */
Arvid Brodinf4214362013-10-30 21:10:47 +0100178 return 0;
179}
180
Arvid Brodin1cc1eb52014-07-04 23:38:57 +0200181static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
182 netdev_features_t features)
183{
184 netdev_features_t mask;
185 struct hsr_port *port;
186
187 mask = features;
188
189 /* Mask out all features that, if supported by one device, should be
190 * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
191 *
192 * Anything that's off in mask will not be enabled - so only things
193 * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
194 * may become enabled.
195 */
196 features &= ~NETIF_F_ONE_FOR_ALL;
197 hsr_for_each_port(hsr, port)
198 features = netdev_increment_features(features,
199 port->dev->features,
200 mask);
201
202 return features;
203}
204
205static netdev_features_t hsr_fix_features(struct net_device *dev,
206 netdev_features_t features)
207{
208 struct hsr_priv *hsr = netdev_priv(dev);
209
210 return hsr_features_recompute(hsr, features);
211}
212
Arvid Brodinf4214362013-10-30 21:10:47 +0100213static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
214{
Arvid Brodinf266a682014-07-04 23:41:03 +0200215 struct hsr_priv *hsr = netdev_priv(dev);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200216 struct hsr_port *master;
Arvid Brodinf4214362013-10-30 21:10:47 +0100217
Arvid Brodinc5a75912014-07-04 23:38:05 +0200218 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
Taehee Yoodf954672019-12-05 07:23:39 +0000219 if (master) {
220 skb->dev = master->dev;
221 hsr_forward_skb(skb, master);
222 } else {
223 atomic_long_inc(&dev->tx_dropped);
224 dev_kfree_skb_any(skb);
225 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100226 return NETDEV_TX_OK;
227}
228
Arvid Brodinf4214362013-10-30 21:10:47 +0100229static const struct header_ops hsr_header_ops = {
Arvid Brodinf266a682014-07-04 23:41:03 +0200230 .create = eth_header,
Arvid Brodinf4214362013-10-30 21:10:47 +0100231 .parse = eth_header_parse,
232};
233
Peter Heiseee1c2792016-04-13 13:52:22 +0200234static void send_hsr_supervision_frame(struct hsr_port *master,
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400235 u8 type, u8 hsr_ver)
Arvid Brodinf4214362013-10-30 21:10:47 +0100236{
Arvid Brodinf4214362013-10-30 21:10:47 +0100237 struct sk_buff *skb;
238 int hlen, tlen;
Peter Heiseee1c2792016-04-13 13:52:22 +0200239 struct hsr_tag *hsr_tag;
Arvid Brodinf4214362013-10-30 21:10:47 +0100240 struct hsr_sup_tag *hsr_stag;
241 struct hsr_sup_payload *hsr_sp;
242 unsigned long irqflags;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200243
244 hlen = LL_RESERVED_SPACE(master->dev);
245 tlen = master->dev->needed_tailroom;
Murali Karicheri0525fc02019-04-05 13:31:27 -0400246 skb = dev_alloc_skb(sizeof(struct hsr_tag) +
247 sizeof(struct hsr_sup_tag) +
248 sizeof(struct hsr_sup_payload) + hlen + tlen);
Arvid Brodinf4214362013-10-30 21:10:47 +0100249
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400250 if (!skb)
Arvid Brodinf4214362013-10-30 21:10:47 +0100251 return;
252
Arvid Brodinf4214362013-10-30 21:10:47 +0100253 skb_reserve(skb, hlen);
254
Arvid Brodinc5a75912014-07-04 23:38:05 +0200255 skb->dev = master->dev;
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400256 skb->protocol = htons(hsr_ver ? ETH_P_HSR : ETH_P_PRP);
Arvid Brodinf4214362013-10-30 21:10:47 +0100257 skb->priority = TC_PRIO_CONTROL;
258
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400259 if (dev_hard_header(skb, skb->dev, (hsr_ver ? ETH_P_HSR : ETH_P_PRP),
Arvid Brodinf266a682014-07-04 23:41:03 +0200260 master->hsr->sup_multicast_addr,
261 skb->dev->dev_addr, skb->len) <= 0)
Arvid Brodinf4214362013-10-30 21:10:47 +0100262 goto out;
Arvid Brodinf266a682014-07-04 23:41:03 +0200263 skb_reset_mac_header(skb);
Taehee Yoo3ed0a1d2019-12-22 11:27:08 +0000264 skb_reset_network_header(skb);
265 skb_reset_transport_header(skb);
Arvid Brodinf4214362013-10-30 21:10:47 +0100266
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400267 if (hsr_ver > 0) {
yuan linyub952f4d2017-06-18 22:52:04 +0800268 hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
Peter Heiseee1c2792016-04-13 13:52:22 +0200269 hsr_tag->encap_proto = htons(ETH_P_PRP);
270 set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
271 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100272
yuan linyub952f4d2017-06-18 22:52:04 +0800273 hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400274 set_hsr_stag_path(hsr_stag, (hsr_ver ? 0x0 : 0xf));
275 set_hsr_stag_HSR_ver(hsr_stag, hsr_ver);
Arvid Brodinf4214362013-10-30 21:10:47 +0100276
Peter Heiseee1c2792016-04-13 13:52:22 +0200277 /* From HSRv1 on we have separate supervision sequence numbers. */
Arvid Brodinf266a682014-07-04 23:41:03 +0200278 spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400279 if (hsr_ver > 0) {
Peter Heiseee1c2792016-04-13 13:52:22 +0200280 hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr);
281 hsr_tag->sequence_nr = htons(master->hsr->sequence_nr);
282 master->hsr->sup_sequence_nr++;
283 master->hsr->sequence_nr++;
284 } else {
285 hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
286 master->hsr->sequence_nr++;
287 }
Arvid Brodinf266a682014-07-04 23:41:03 +0200288 spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
Arvid Brodinf4214362013-10-30 21:10:47 +0100289
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400290 hsr_stag->HSR_TLV_type = type;
Peter Heiseee1c2792016-04-13 13:52:22 +0200291 /* TODO: Why 12 in HSRv0? */
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400292 hsr_stag->HSR_TLV_length =
293 hsr_ver ? sizeof(struct hsr_sup_payload) : 12;
Arvid Brodinf4214362013-10-30 21:10:47 +0100294
Arvid Brodinf4214362013-10-30 21:10:47 +0100295 /* Payload: MacAddressA */
yuan linyub952f4d2017-06-18 22:52:04 +0800296 hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400297 ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100298
Florian Fainelli414e7d72017-08-21 12:59:10 -0700299 if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
300 return;
Peter Heiseee1c2792016-04-13 13:52:22 +0200301
Arvid Brodinf266a682014-07-04 23:41:03 +0200302 hsr_forward_skb(skb, master);
Arvid Brodinf4214362013-10-30 21:10:47 +0100303 return;
304
305out:
Dan Carpenter3d1a54e2015-11-21 13:34:12 +0300306 WARN_ONCE(1, "HSR: Could not send supervision frame\n");
Arvid Brodinf4214362013-10-30 21:10:47 +0100307 kfree_skb(skb);
308}
309
Arvid Brodinf4214362013-10-30 21:10:47 +0100310/* Announce (supervision frame) timer function
311 */
Kees Cookdda436b2017-10-24 01:46:16 -0700312static void hsr_announce(struct timer_list *t)
Arvid Brodinf4214362013-10-30 21:10:47 +0100313{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200314 struct hsr_priv *hsr;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200315 struct hsr_port *master;
Eric Dumazet1e027962019-03-07 09:36:33 -0800316 unsigned long interval;
Arvid Brodinf4214362013-10-30 21:10:47 +0100317
Kees Cookdda436b2017-10-24 01:46:16 -0700318 hsr = from_timer(hsr, t, announce_timer);
Arvid Brodinf266a682014-07-04 23:41:03 +0200319
320 rcu_read_lock();
Arvid Brodinc5a75912014-07-04 23:38:05 +0200321 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
Arvid Brodinf4214362013-10-30 21:10:47 +0100322
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400323 if (hsr->announce_count < 3 && hsr->prot_version == 0) {
Peter Heiseee1c2792016-04-13 13:52:22 +0200324 send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE,
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400325 hsr->prot_version);
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200326 hsr->announce_count++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100327
Eric Dumazet1e027962019-03-07 09:36:33 -0800328 interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
Peter Heiseee1c2792016-04-13 13:52:22 +0200329 } else {
330 send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400331 hsr->prot_version);
Peter Heiseee1c2792016-04-13 13:52:22 +0200332
Eric Dumazet1e027962019-03-07 09:36:33 -0800333 interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
Peter Heiseee1c2792016-04-13 13:52:22 +0200334 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100335
Arvid Brodinc5a75912014-07-04 23:38:05 +0200336 if (is_admin_up(master->dev))
Eric Dumazet1e027962019-03-07 09:36:33 -0800337 mod_timer(&hsr->announce_timer, jiffies + interval);
Arvid Brodinf266a682014-07-04 23:41:03 +0200338
339 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100340}
341
Taehee Yooe0a4b992020-02-28 18:02:10 +0000342static void hsr_del_ports(struct hsr_priv *hsr)
343{
344 struct hsr_port *port;
345
346 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
347 if (port)
348 hsr_del_port(port);
349
350 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
351 if (port)
352 hsr_del_port(port);
353
354 port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
355 if (port)
356 hsr_del_port(port);
357}
358
Cong Wang311633b2019-07-09 23:24:54 -0700359/* This has to be called after all the readers are gone.
360 * Otherwise we would have to check the return value of
361 * hsr_port_get_hsr().
362 */
363static void hsr_dev_destroy(struct net_device *hsr_dev)
Arvid Brodinf4214362013-10-30 21:10:47 +0100364{
Taehee Yooe0a4b992020-02-28 18:02:10 +0000365 struct hsr_priv *hsr = netdev_priv(hsr_dev);
Arvid Brodin56b08fd2015-02-27 21:26:03 +0100366
Murali Karicheri9c5f8a192019-04-15 11:36:01 -0400367 hsr_debugfs_term(hsr);
Taehee Yooe0a4b992020-02-28 18:02:10 +0000368 hsr_del_ports(hsr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100369
Arvid Brodinabff7162014-07-04 23:35:47 +0200370 del_timer_sync(&hsr->prune_timer);
371 del_timer_sync(&hsr->announce_timer);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200372
Taehee Yoo92a35672019-12-22 11:26:54 +0000373 hsr_del_self_node(hsr);
Cong Wangb9a1e622019-07-03 17:21:13 -0700374 hsr_del_nodes(&hsr->node_db);
Arvid Brodinf4214362013-10-30 21:10:47 +0100375}
376
377static const struct net_device_ops hsr_device_ops = {
378 .ndo_change_mtu = hsr_dev_change_mtu,
379 .ndo_open = hsr_dev_open,
380 .ndo_stop = hsr_dev_close,
381 .ndo_start_xmit = hsr_dev_xmit,
Arvid Brodin1cc1eb52014-07-04 23:38:57 +0200382 .ndo_fix_features = hsr_fix_features,
Cong Wang311633b2019-07-09 23:24:54 -0700383 .ndo_uninit = hsr_dev_destroy,
Arvid Brodinf4214362013-10-30 21:10:47 +0100384};
385
Arvid Brodin4c3477d2014-07-04 23:39:42 +0200386static struct device_type hsr_type = {
387 .name = "hsr",
388};
Arvid Brodinf4214362013-10-30 21:10:47 +0100389
390void hsr_dev_setup(struct net_device *dev)
391{
Tobias Klauserd4af4c02017-02-21 14:04:59 +0100392 eth_hw_addr_random(dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100393
394 ether_setup(dev);
Jarod Wilsonb3e38932016-10-20 13:55:22 -0400395 dev->min_mtu = 0;
Arvid Brodin4c3477d2014-07-04 23:39:42 +0200396 dev->header_ops = &hsr_header_ops;
397 dev->netdev_ops = &hsr_device_ops;
398 SET_NETDEV_DEVTYPE(dev, &hsr_type);
Phil Sutter9ad09c52015-08-18 10:30:45 +0200399 dev->priv_flags |= IFF_NO_QUEUE;
Arvid Brodinf4214362013-10-30 21:10:47 +0100400
David S. Millercf124db2017-05-08 12:52:56 -0400401 dev->needs_free_netdev = true;
Arvid Brodin1cc1eb52014-07-04 23:38:57 +0200402
403 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
404 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
405 NETIF_F_HW_VLAN_CTAG_TX;
406
407 dev->features = dev->hw_features;
408
409 /* Prevent recursive tx locking */
410 dev->features |= NETIF_F_LLTX;
411 /* VLAN on top of HSR needs testing and probably some work on
412 * hsr_header_create() etc.
413 */
414 dev->features |= NETIF_F_VLAN_CHALLENGED;
Arvid Brodin4c3477d2014-07-04 23:39:42 +0200415 /* Not sure about this. Taken from bridge code. netdev_features.h says
416 * it means "Does not change network namespaces".
417 */
418 dev->features |= NETIF_F_NETNS_LOCAL;
Arvid Brodinf4214362013-10-30 21:10:47 +0100419}
420
Arvid Brodinf4214362013-10-30 21:10:47 +0100421/* Return true if dev is a HSR master; return false otherwise.
422 */
Arvid Brodinc5a75912014-07-04 23:38:05 +0200423inline bool is_hsr_master(struct net_device *dev)
Arvid Brodinf4214362013-10-30 21:10:47 +0100424{
425 return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
426}
427
Arvid Brodinf4214362013-10-30 21:10:47 +0100428/* Default multicast address for HSR Supervision frames */
Joe Perchese83abe32014-02-18 10:37:20 -0800429static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
Arvid Brodinf4214362013-10-30 21:10:47 +0100430 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
431};
432
433int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000434 unsigned char multicast_spec, u8 protocol_version,
435 struct netlink_ext_ack *extack)
Arvid Brodinf4214362013-10-30 21:10:47 +0100436{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200437 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100438 int res;
439
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200440 hsr = netdev_priv(hsr_dev);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200441 INIT_LIST_HEAD(&hsr->ports);
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200442 INIT_LIST_HEAD(&hsr->node_db);
443 INIT_LIST_HEAD(&hsr->self_node_db);
Taehee Yoo92a35672019-12-22 11:26:54 +0000444 spin_lock_init(&hsr->list_lock);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200445
446 ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
447
448 /* Make sure we recognize frames from ourselves in hsr_rcv() */
Taehee Yoo92a35672019-12-22 11:26:54 +0000449 res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
Arvid Brodin51f3c602014-07-04 23:37:27 +0200450 slave[1]->dev_addr);
451 if (res < 0)
452 return res;
453
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200454 spin_lock_init(&hsr->seqnr_lock);
Arvid Brodinf4214362013-10-30 21:10:47 +0100455 /* Overflow soon to find bugs easier: */
Arvid Brodinf266a682014-07-04 23:41:03 +0200456 hsr->sequence_nr = HSR_SEQNR_START;
Peter Heiseee1c2792016-04-13 13:52:22 +0200457 hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
Arvid Brodinf4214362013-10-30 21:10:47 +0100458
Kees Cookdda436b2017-10-24 01:46:16 -0700459 timer_setup(&hsr->announce_timer, hsr_announce, 0);
460 timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
Arvid Brodinabff7162014-07-04 23:35:47 +0200461
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200462 ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
463 hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
Arvid Brodinf4214362013-10-30 21:10:47 +0100464
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400465 hsr->prot_version = protocol_version;
Peter Heiseee1c2792016-04-13 13:52:22 +0200466
Arvid Brodinc5a75912014-07-04 23:38:05 +0200467 /* FIXME: should I modify the value of these?
468 *
469 * - hsr_dev->flags - i.e.
470 * IFF_MASTER/SLAVE?
471 * - hsr_dev->priv_flags - i.e.
472 * IFF_EBRIDGE?
473 * IFF_TX_SKB_SHARING?
474 * IFF_HSR_MASTER/SLAVE?
475 */
Arvid Brodinf4214362013-10-30 21:10:47 +0100476
Arvid Brodinf4214362013-10-30 21:10:47 +0100477 /* Make sure the 1st call to netif_carrier_on() gets through */
478 netif_carrier_off(hsr_dev);
479
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000480 res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
Arvid Brodinf4214362013-10-30 21:10:47 +0100481 if (res)
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000482 goto err_add_master;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200483
Arvid Brodinc5a75912014-07-04 23:38:05 +0200484 res = register_netdevice(hsr_dev);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200485 if (res)
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000486 goto err_unregister;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200487
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000488 res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200489 if (res)
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000490 goto err_add_slaves;
491
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000492 res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200493 if (res)
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000494 goto err_add_slaves;
Arvid Brodinf4214362013-10-30 21:10:47 +0100495
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000496 hsr_debugfs_init(hsr, hsr_dev);
Muhammad Falak R Wani15db6e02016-05-15 19:37:44 +0530497 mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
Arvid Brodinabff7162014-07-04 23:35:47 +0200498
Arvid Brodinf4214362013-10-30 21:10:47 +0100499 return 0;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200500
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000501err_add_slaves:
502 unregister_netdevice(hsr_dev);
503err_unregister:
Taehee Yooe0a4b992020-02-28 18:02:10 +0000504 hsr_del_ports(hsr);
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000505err_add_master:
Taehee Yoo92a35672019-12-22 11:26:54 +0000506 hsr_del_self_node(hsr);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200507
508 return res;
Arvid Brodinf4214362013-10-30 21:10:47 +0100509}