blob: 8936b3c2bb843eb86b488c1e7f4a4fc1e8c63f68 [file] [log] [blame]
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01002 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02009 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +010010 *
11 * This file contains device methods for creating, using and destroying
12 * virtual HSR devices.
13 */
14
15#include <linux/netdevice.h>
16#include <linux/skbuff.h>
17#include <linux/etherdevice.h>
18#include <linux/if_arp.h>
19#include <linux/rtnetlink.h>
20#include <linux/pkt_sched.h>
21#include "hsr_device.h"
Arvid Brodin81ba6af2014-07-04 23:35:24 +020022#include "hsr_slave.h"
Arvid Brodinf4214362013-10-30 21:10:47 +010023#include "hsr_framereg.h"
24#include "hsr_main.h"
25
26
27static bool is_admin_up(struct net_device *dev)
28{
29 return dev && (dev->flags & IFF_UP);
30}
31
32static bool is_slave_up(struct net_device *dev)
33{
34 return dev && is_admin_up(dev) && netif_oper_up(dev);
35}
36
37static void __hsr_set_operstate(struct net_device *dev, int transition)
38{
39 write_lock_bh(&dev_base_lock);
40 if (dev->operstate != transition) {
41 dev->operstate = transition;
42 write_unlock_bh(&dev_base_lock);
43 netdev_state_change(dev);
44 } else {
45 write_unlock_bh(&dev_base_lock);
46 }
47}
48
Arvid Brodine9aae562014-07-04 23:36:40 +020049static void hsr_set_operstate(struct net_device *hsr_dev, bool has_carrier)
Arvid Brodinf4214362013-10-30 21:10:47 +010050{
51 if (!is_admin_up(hsr_dev)) {
52 __hsr_set_operstate(hsr_dev, IF_OPER_DOWN);
53 return;
54 }
55
Arvid Brodine9aae562014-07-04 23:36:40 +020056 if (has_carrier)
Arvid Brodinf4214362013-10-30 21:10:47 +010057 __hsr_set_operstate(hsr_dev, IF_OPER_UP);
58 else
59 __hsr_set_operstate(hsr_dev, IF_OPER_LOWERLAYERDOWN);
60}
61
Arvid Brodine9aae562014-07-04 23:36:40 +020062static bool hsr_check_carrier(struct hsr_priv *hsr)
Arvid Brodinf4214362013-10-30 21:10:47 +010063{
Arvid Brodine9aae562014-07-04 23:36:40 +020064 bool has_carrier;
65
66 has_carrier = (is_slave_up(hsr->slave[0]) || is_slave_up(hsr->slave[1]));
67
68 if (has_carrier)
69 netif_carrier_on(hsr->dev);
Arvid Brodinf4214362013-10-30 21:10:47 +010070 else
Arvid Brodine9aae562014-07-04 23:36:40 +020071 netif_carrier_off(hsr->dev);
72
73 return has_carrier;
Arvid Brodinf4214362013-10-30 21:10:47 +010074}
75
76
Arvid Brodine9aae562014-07-04 23:36:40 +020077static void hsr_check_announce(struct net_device *hsr_dev,
78 unsigned char old_operstate)
Arvid Brodinf4214362013-10-30 21:10:47 +010079{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020080 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +010081
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020082 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +010083
84 if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
85 /* Went up */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020086 hsr->announce_count = 0;
87 hsr->announce_timer.expires = jiffies +
Arvid Brodinf4214362013-10-30 21:10:47 +010088 msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020089 add_timer(&hsr->announce_timer);
Arvid Brodinf4214362013-10-30 21:10:47 +010090 }
91
92 if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
93 /* Went down */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020094 del_timer(&hsr->announce_timer);
Arvid Brodinf4214362013-10-30 21:10:47 +010095}
96
Arvid Brodine9aae562014-07-04 23:36:40 +020097void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
98{
99 unsigned char old_operstate;
100 bool has_carrier;
101
102 /* netif_stacked_transfer_operstate() cannot be used here since
103 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
104 */
105 old_operstate = hsr->dev->operstate;
106 has_carrier = hsr_check_carrier(hsr);
107 hsr_set_operstate(hsr->dev, has_carrier);
108 hsr_check_announce(hsr->dev, old_operstate);
109}
110
Arvid Brodinf4214362013-10-30 21:10:47 +0100111
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200112int hsr_get_max_mtu(struct hsr_priv *hsr)
Arvid Brodinf4214362013-10-30 21:10:47 +0100113{
114 int mtu_max;
115
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200116 if (hsr->slave[0] && hsr->slave[1])
117 mtu_max = min(hsr->slave[0]->mtu, hsr->slave[1]->mtu);
118 else if (hsr->slave[0])
119 mtu_max = hsr->slave[0]->mtu;
120 else if (hsr->slave[1])
121 mtu_max = hsr->slave[1]->mtu;
Arvid Brodinf4214362013-10-30 21:10:47 +0100122 else
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200123 mtu_max = HSR_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100124
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200125 return mtu_max - HSR_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100126}
127
128static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
129{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200130 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100131
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200132 hsr = netdev_priv(dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100133
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200134 if (new_mtu > hsr_get_max_mtu(hsr)) {
135 netdev_info(hsr->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
136 HSR_HLEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100137 return -EINVAL;
138 }
139
140 dev->mtu = new_mtu;
141
142 return 0;
143}
144
145static int hsr_dev_open(struct net_device *dev)
146{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200147 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100148 int i;
149 char *slave_name;
150
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200151 hsr = netdev_priv(dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100152
153 for (i = 0; i < HSR_MAX_SLAVE; i++) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200154 if (hsr->slave[i])
155 slave_name = hsr->slave[i]->name;
Arvid Brodinf4214362013-10-30 21:10:47 +0100156 else
157 slave_name = "null";
158
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200159 if (!is_slave_up(hsr->slave[i]))
Arvid Brodinf4214362013-10-30 21:10:47 +0100160 netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a working HSR network\n",
161 'A' + i, slave_name);
162 }
163
164 return 0;
165}
166
167static int hsr_dev_close(struct net_device *dev)
168{
169 /* Nothing to do here. We could try to restore the state of the slaves
170 * to what they were before being changed by the hsr master dev's state,
171 * but they might have been changed manually in the mean time too, so
172 * taking them up or down here might be confusing and is probably not a
173 * good idea.
174 */
175 return 0;
176}
177
178
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200179static void hsr_fill_tag(struct hsr_ethhdr *hsr_ethhdr, struct hsr_priv *hsr)
Arvid Brodinf4214362013-10-30 21:10:47 +0100180{
181 unsigned long irqflags;
182
183 /* IEC 62439-1:2010, p 48, says the 4-bit "path" field can take values
184 * between 0001-1001 ("ring identifier", for regular HSR frames),
185 * or 1111 ("HSR management", supervision frames). Unfortunately, the
186 * spec writers forgot to explain what a "ring identifier" is, or
187 * how it is used. So we just set this to 0001 for regular frames,
188 * and 1111 for supervision frames.
189 */
190 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, 0x1);
191
192 /* IEC 62439-1:2010, p 12: "The link service data unit in an Ethernet
193 * frame is the content of the frame located between the Length/Type
194 * field and the Frame Check Sequence."
195 *
196 * IEC 62439-3, p 48, specifies the "original LPDU" to include the
197 * original "LT" field (what "LT" means is not explained anywhere as
198 * far as I can see - perhaps "Length/Type"?). So LSDU_size might
199 * equal original length + 2.
200 * Also, the fact that this field is not used anywhere (might be used
201 * by a RedBox connecting HSR and PRP nets?) means I cannot test its
202 * correctness. Instead of guessing, I set this to 0 here, to make any
203 * problems immediately apparent. Anyone using this driver with PRP/HSR
204 * RedBoxes might need to fix this...
205 */
206 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, 0);
207
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200208 spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
209 hsr_ethhdr->hsr_tag.sequence_nr = htons(hsr->sequence_nr);
210 hsr->sequence_nr++;
211 spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
Arvid Brodinf4214362013-10-30 21:10:47 +0100212
213 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
214
215 hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
216}
217
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200218static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr,
Arvid Brodinf4214362013-10-30 21:10:47 +0100219 enum hsr_dev_idx dev_idx)
220{
221 struct hsr_ethhdr *hsr_ethhdr;
222
223 hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
224
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200225 skb->dev = hsr->slave[dev_idx];
Arvid Brodinf4214362013-10-30 21:10:47 +0100226
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200227 hsr_addr_subst_dest(hsr, &hsr_ethhdr->ethhdr, dev_idx);
Arvid Brodinf4214362013-10-30 21:10:47 +0100228
229 /* Address substitution (IEC62439-3 pp 26, 50): replace mac
230 * address of outgoing frame with that of the outgoing slave's.
231 */
Joe Perchese83abe32014-02-18 10:37:20 -0800232 ether_addr_copy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100233
234 return dev_queue_xmit(skb);
235}
236
237
238static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
239{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200240 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100241 struct hsr_ethhdr *hsr_ethhdr;
242 struct sk_buff *skb2;
243 int res1, res2;
244
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200245 hsr = netdev_priv(dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100246 hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
247
248 if ((skb->protocol != htons(ETH_P_PRP)) ||
249 (hsr_ethhdr->ethhdr.h_proto != htons(ETH_P_PRP))) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200250 hsr_fill_tag(hsr_ethhdr, hsr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100251 skb->protocol = htons(ETH_P_PRP);
252 }
253
254 skb2 = pskb_copy(skb, GFP_ATOMIC);
255
256 res1 = NET_XMIT_DROP;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200257 if (likely(hsr->slave[HSR_DEV_SLAVE_A]))
258 res1 = slave_xmit(skb, hsr, HSR_DEV_SLAVE_A);
Arvid Brodinf4214362013-10-30 21:10:47 +0100259
260 res2 = NET_XMIT_DROP;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200261 if (likely(skb2 && hsr->slave[HSR_DEV_SLAVE_B]))
262 res2 = slave_xmit(skb2, hsr, HSR_DEV_SLAVE_B);
Arvid Brodinf4214362013-10-30 21:10:47 +0100263
264 if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN ||
265 res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200266 hsr->dev->stats.tx_packets++;
267 hsr->dev->stats.tx_bytes += skb->len;
Arvid Brodinf4214362013-10-30 21:10:47 +0100268 } else {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200269 hsr->dev->stats.tx_dropped++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100270 }
271
272 return NETDEV_TX_OK;
273}
274
275
276static int hsr_header_create(struct sk_buff *skb, struct net_device *dev,
277 unsigned short type, const void *daddr,
278 const void *saddr, unsigned int len)
279{
280 int res;
281
282 /* Make room for the HSR tag now. We will fill it in later (in
283 * hsr_dev_xmit)
284 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200285 if (skb_headroom(skb) < HSR_HLEN + ETH_HLEN)
Arvid Brodinf4214362013-10-30 21:10:47 +0100286 return -ENOBUFS;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200287 skb_push(skb, HSR_HLEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100288
289 /* To allow VLAN/HSR combos we should probably use
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200290 * res = dev_hard_header(skb, dev, type, daddr, saddr, len + HSR_HLEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100291 * here instead. It would require other changes too, though - e.g.
292 * separate headers for each slave etc...
293 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200294 res = eth_header(skb, dev, type, daddr, saddr, len + HSR_HLEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100295 if (res <= 0)
296 return res;
297 skb_reset_mac_header(skb);
298
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200299 return res + HSR_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100300}
301
302
303static const struct header_ops hsr_header_ops = {
304 .create = hsr_header_create,
305 .parse = eth_header_parse,
306};
307
308
309/* HSR:2010 supervision frames should be padded so that the whole frame,
310 * including headers and FCS, is 64 bytes (without VLAN).
311 */
312static int hsr_pad(int size)
313{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200314 const int min_size = ETH_ZLEN - HSR_HLEN - ETH_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100315
316 if (size >= min_size)
317 return size;
318 return min_size;
319}
320
321static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
322{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200323 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100324 struct sk_buff *skb;
325 int hlen, tlen;
326 struct hsr_sup_tag *hsr_stag;
327 struct hsr_sup_payload *hsr_sp;
328 unsigned long irqflags;
329
330 hlen = LL_RESERVED_SPACE(hsr_dev);
331 tlen = hsr_dev->needed_tailroom;
332 skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
333 GFP_ATOMIC);
334
335 if (skb == NULL)
336 return;
337
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200338 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100339
340 skb_reserve(skb, hlen);
341
342 skb->dev = hsr_dev;
343 skb->protocol = htons(ETH_P_PRP);
344 skb->priority = TC_PRIO_CONTROL;
345
346 if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200347 hsr->sup_multicast_addr,
Arvid Brodinf4214362013-10-30 21:10:47 +0100348 skb->dev->dev_addr, skb->len) < 0)
349 goto out;
350
351 skb_pull(skb, sizeof(struct ethhdr));
352 hsr_stag = (typeof(hsr_stag)) skb->data;
353
354 set_hsr_stag_path(hsr_stag, 0xf);
355 set_hsr_stag_HSR_Ver(hsr_stag, 0);
356
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200357 spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
358 hsr_stag->sequence_nr = htons(hsr->sequence_nr);
359 hsr->sequence_nr++;
360 spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
Arvid Brodinf4214362013-10-30 21:10:47 +0100361
362 hsr_stag->HSR_TLV_Type = type;
363 hsr_stag->HSR_TLV_Length = 12;
364
365 skb_push(skb, sizeof(struct ethhdr));
366
367 /* Payload: MacAddressA */
368 hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
Joe Perchese83abe32014-02-18 10:37:20 -0800369 ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100370
371 dev_queue_xmit(skb);
372 return;
373
374out:
375 kfree_skb(skb);
376}
377
378
379/* Announce (supervision frame) timer function
380 */
381static void hsr_announce(unsigned long data)
382{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200383 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100384
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200385 hsr = (struct hsr_priv *) data;
Arvid Brodinf4214362013-10-30 21:10:47 +0100386
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200387 if (hsr->announce_count < 3) {
388 send_hsr_supervision_frame(hsr->dev, HSR_TLV_ANNOUNCE);
389 hsr->announce_count++;
Arvid Brodinf4214362013-10-30 21:10:47 +0100390 } else {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200391 send_hsr_supervision_frame(hsr->dev, HSR_TLV_LIFE_CHECK);
Arvid Brodinf4214362013-10-30 21:10:47 +0100392 }
393
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200394 if (hsr->announce_count < 3)
395 hsr->announce_timer.expires = jiffies +
Arvid Brodinf4214362013-10-30 21:10:47 +0100396 msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
397 else
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200398 hsr->announce_timer.expires = jiffies +
Arvid Brodinf4214362013-10-30 21:10:47 +0100399 msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
400
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200401 if (is_admin_up(hsr->dev))
402 add_timer(&hsr->announce_timer);
Arvid Brodinf4214362013-10-30 21:10:47 +0100403}
404
405
406static void restore_slaves(struct net_device *hsr_dev)
407{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200408 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100409 int i;
410 int res;
411
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200412 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100413
414 rtnl_lock();
415
Arvid Brodinf4214362013-10-30 21:10:47 +0100416 for (i = 0; i < HSR_MAX_SLAVE; i++) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200417 if (!hsr->slave[i])
Arvid Brodinf4214362013-10-30 21:10:47 +0100418 continue;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200419 res = dev_set_promiscuity(hsr->slave[i], -1);
Arvid Brodinf4214362013-10-30 21:10:47 +0100420 if (res)
421 netdev_info(hsr_dev,
422 "Cannot restore slave promiscuity (%s, %d)\n",
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200423 hsr->slave[i]->name, res);
Arvid Brodin81ba6af2014-07-04 23:35:24 +0200424
425 if (hsr->slave[i]->rx_handler == hsr_handle_frame)
426 netdev_rx_handler_unregister(hsr->slave[i]);
Arvid Brodinf4214362013-10-30 21:10:47 +0100427 }
428
Arvid Brodin81ba6af2014-07-04 23:35:24 +0200429
Arvid Brodinf4214362013-10-30 21:10:47 +0100430 rtnl_unlock();
431}
432
433static void reclaim_hsr_dev(struct rcu_head *rh)
434{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200435 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100436
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200437 hsr = container_of(rh, struct hsr_priv, rcu_head);
438 free_netdev(hsr->dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100439}
440
441
442/* According to comments in the declaration of struct net_device, this function
443 * is "Called from unregister, can be used to call free_netdev". Ok then...
444 */
445static void hsr_dev_destroy(struct net_device *hsr_dev)
446{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200447 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100448
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200449 hsr = netdev_priv(hsr_dev);
Arvid Brodinf4214362013-10-30 21:10:47 +0100450
Arvid Brodinabff7162014-07-04 23:35:47 +0200451 del_timer_sync(&hsr->prune_timer);
452 del_timer_sync(&hsr->announce_timer);
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200453 unregister_hsr_master(hsr); /* calls list_del_rcu on hsr */
Arvid Brodinf4214362013-10-30 21:10:47 +0100454 restore_slaves(hsr_dev);
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200455 call_rcu(&hsr->rcu_head, reclaim_hsr_dev); /* reclaim hsr */
Arvid Brodinf4214362013-10-30 21:10:47 +0100456}
457
458static const struct net_device_ops hsr_device_ops = {
459 .ndo_change_mtu = hsr_dev_change_mtu,
460 .ndo_open = hsr_dev_open,
461 .ndo_stop = hsr_dev_close,
462 .ndo_start_xmit = hsr_dev_xmit,
463};
464
465
466void hsr_dev_setup(struct net_device *dev)
467{
468 random_ether_addr(dev->dev_addr);
469
470 ether_setup(dev);
471 dev->header_ops = &hsr_header_ops;
472 dev->netdev_ops = &hsr_device_ops;
473 dev->tx_queue_len = 0;
474
475 dev->destructor = hsr_dev_destroy;
476}
477
478
479/* Return true if dev is a HSR master; return false otherwise.
480 */
481bool is_hsr_master(struct net_device *dev)
482{
483 return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
484}
485
486static int check_slave_ok(struct net_device *dev)
487{
488 /* Don't allow HSR on non-ethernet like devices */
489 if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
490 (dev->addr_len != ETH_ALEN)) {
491 netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
492 return -EINVAL;
493 }
494
495 /* Don't allow enslaving hsr devices */
496 if (is_hsr_master(dev)) {
497 netdev_info(dev, "Cannot create trees of HSR devices.\n");
498 return -EINVAL;
499 }
500
501 if (is_hsr_slave(dev)) {
502 netdev_info(dev, "This device is already a HSR slave.\n");
503 return -EINVAL;
504 }
505
506 if (dev->priv_flags & IFF_802_1Q_VLAN) {
507 netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
508 return -EINVAL;
509 }
510
511 /* HSR over bonded devices has not been tested, but I'm not sure it
512 * won't work...
513 */
514
515 return 0;
516}
517
518
519/* Default multicast address for HSR Supervision frames */
Joe Perchese83abe32014-02-18 10:37:20 -0800520static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
Arvid Brodinf4214362013-10-30 21:10:47 +0100521 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
522};
523
524int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
525 unsigned char multicast_spec)
526{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200527 struct hsr_priv *hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100528 int i;
529 int res;
530
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200531 hsr = netdev_priv(hsr_dev);
532 hsr->dev = hsr_dev;
533 INIT_LIST_HEAD(&hsr->node_db);
534 INIT_LIST_HEAD(&hsr->self_node_db);
Arvid Brodinf4214362013-10-30 21:10:47 +0100535 for (i = 0; i < HSR_MAX_SLAVE; i++)
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200536 hsr->slave[i] = slave[i];
Arvid Brodinf4214362013-10-30 21:10:47 +0100537
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200538 spin_lock_init(&hsr->seqnr_lock);
Arvid Brodinf4214362013-10-30 21:10:47 +0100539 /* Overflow soon to find bugs easier: */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200540 hsr->sequence_nr = USHRT_MAX - 1024;
Arvid Brodinf4214362013-10-30 21:10:47 +0100541
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200542 init_timer(&hsr->announce_timer);
543 hsr->announce_timer.function = hsr_announce;
544 hsr->announce_timer.data = (unsigned long) hsr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100545
Arvid Brodinabff7162014-07-04 23:35:47 +0200546 init_timer(&hsr->prune_timer);
547 hsr->prune_timer.function = hsr_prune_nodes;
548 hsr->prune_timer.data = (unsigned long) hsr;
549
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200550 ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
551 hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
Arvid Brodinf4214362013-10-30 21:10:47 +0100552
553/* FIXME: should I modify the value of these?
554 *
555 * - hsr_dev->flags - i.e.
556 * IFF_MASTER/SLAVE?
557 * - hsr_dev->priv_flags - i.e.
558 * IFF_EBRIDGE?
559 * IFF_TX_SKB_SHARING?
560 * IFF_HSR_MASTER/SLAVE?
561 */
562
563 for (i = 0; i < HSR_MAX_SLAVE; i++) {
564 res = check_slave_ok(slave[i]);
565 if (res)
566 return res;
567 }
568
569 hsr_dev->features = slave[0]->features & slave[1]->features;
570 /* Prevent recursive tx locking */
571 hsr_dev->features |= NETIF_F_LLTX;
572 /* VLAN on top of HSR needs testing and probably some work on
573 * hsr_header_create() etc.
574 */
575 hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
576
577 /* Set hsr_dev's MAC address to that of mac_slave1 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200578 ether_addr_copy(hsr_dev->dev_addr, hsr->slave[0]->dev_addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100579
580 /* Set required header length */
581 for (i = 0; i < HSR_MAX_SLAVE; i++) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200582 if (slave[i]->hard_header_len + HSR_HLEN >
Arvid Brodinf4214362013-10-30 21:10:47 +0100583 hsr_dev->hard_header_len)
584 hsr_dev->hard_header_len =
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200585 slave[i]->hard_header_len + HSR_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100586 }
587
588 /* MTU */
589 for (i = 0; i < HSR_MAX_SLAVE; i++)
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200590 if (slave[i]->mtu - HSR_HLEN < hsr_dev->mtu)
591 hsr_dev->mtu = slave[i]->mtu - HSR_HLEN;
Arvid Brodinf4214362013-10-30 21:10:47 +0100592
593 /* Make sure the 1st call to netif_carrier_on() gets through */
594 netif_carrier_off(hsr_dev);
595
596 /* Promiscuity */
597 for (i = 0; i < HSR_MAX_SLAVE; i++) {
598 res = dev_set_promiscuity(slave[i], 1);
599 if (res) {
600 netdev_info(hsr_dev, "Cannot set slave promiscuity (%s, %d)\n",
601 slave[i]->name, res);
602 goto fail;
603 }
604 }
605
Arvid Brodin81ba6af2014-07-04 23:35:24 +0200606 for (i = 0; i < HSR_MAX_SLAVE; i++) {
607 res = netdev_rx_handler_register(slave[i], hsr_handle_frame,
608 hsr);
609 if (res)
610 goto fail;
611 }
612
Arvid Brodinf4214362013-10-30 21:10:47 +0100613 /* Make sure we recognize frames from ourselves in hsr_rcv() */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200614 res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
615 hsr->slave[1]->dev_addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100616 if (res < 0)
617 goto fail;
618
619 res = register_netdevice(hsr_dev);
620 if (res)
621 goto fail;
622
Arvid Brodinabff7162014-07-04 23:35:47 +0200623 hsr->prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
624 add_timer(&hsr->prune_timer);
625
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200626 register_hsr_master(hsr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100627
628 return 0;
629
630fail:
631 restore_slaves(hsr_dev);
632 return res;
633}