blob: fdc191015208bc74931e3d3786deb71b5de20fb2 [file] [log] [blame]
Arvid Brodinf266a682014-07-04 23:41:03 +02001/* Copyright 2011-2014 Autronica Fire and Security AS
2 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms of the GNU General Public License as published by the Free
5 * Software Foundation; either version 2 of the License, or (at your option)
6 * any later version.
7 *
8 * Author(s):
9 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
10 */
11
12#include "hsr_forward.h"
13#include <linux/types.h>
14#include <linux/skbuff.h>
15#include <linux/etherdevice.h>
16#include <linux/if_vlan.h>
17#include "hsr_main.h"
18#include "hsr_framereg.h"
19
Arvid Brodinf266a682014-07-04 23:41:03 +020020struct hsr_node;
21
22struct hsr_frame_info {
23 struct sk_buff *skb_std;
24 struct sk_buff *skb_hsr;
25 struct hsr_port *port_rcv;
26 struct hsr_node *node_src;
27 u16 sequence_nr;
28 bool is_supervision;
29 bool is_vlan;
30 bool is_local_dest;
31 bool is_local_exclusive;
32};
33
Arvid Brodinf266a682014-07-04 23:41:03 +020034/* The uses I can see for these HSR supervision frames are:
35 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
36 * 22") to reset any sequence_nr counters belonging to that node. Useful if
37 * the other node's counter has been reset for some reason.
38 * --
39 * Or not - resetting the counter and bridging the frame would create a
40 * loop, unfortunately.
41 *
42 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
43 * frame is received from a particular node, we know something is wrong.
44 * We just register these (as with normal frames) and throw them away.
45 *
46 * 3) Allow different MAC addresses for the two slave interfaces, using the
47 * MacAddressA field.
48 */
49static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
50{
Peter Heiseee1c2792016-04-13 13:52:22 +020051 struct ethhdr *ethHdr;
52 struct hsr_sup_tag *hsrSupTag;
53 struct hsrv1_ethhdr_sp *hsrV1Hdr;
Arvid Brodinf266a682014-07-04 23:41:03 +020054
55 WARN_ON_ONCE(!skb_mac_header_was_set(skb));
Peter Heiseee1c2792016-04-13 13:52:22 +020056 ethHdr = (struct ethhdr *) skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +020057
Peter Heiseee1c2792016-04-13 13:52:22 +020058 /* Correct addr? */
59 if (!ether_addr_equal(ethHdr->h_dest,
Arvid Brodinf266a682014-07-04 23:41:03 +020060 hsr->sup_multicast_addr))
61 return false;
62
Peter Heiseee1c2792016-04-13 13:52:22 +020063 /* Correct ether type?. */
64 if (!(ethHdr->h_proto == htons(ETH_P_PRP)
65 || ethHdr->h_proto == htons(ETH_P_HSR)))
Arvid Brodinf266a682014-07-04 23:41:03 +020066 return false;
Peter Heiseee1c2792016-04-13 13:52:22 +020067
68 /* Get the supervision header from correct location. */
69 if (ethHdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
70 hsrV1Hdr = (struct hsrv1_ethhdr_sp *) skb_mac_header(skb);
71 if (hsrV1Hdr->hsr.encap_proto != htons(ETH_P_PRP))
72 return false;
73
74 hsrSupTag = &hsrV1Hdr->hsr_sup;
75 } else {
Murali Karicherid595b852019-04-05 13:31:23 -040076 hsrSupTag =
77 &((struct hsrv0_ethhdr_sp *) skb_mac_header(skb))->hsr_sup;
Peter Heiseee1c2792016-04-13 13:52:22 +020078 }
79
Murali Karicheri56703422019-04-05 13:31:25 -040080 if (hsrSupTag->HSR_TLV_Type != HSR_TLV_ANNOUNCE &&
81 hsrSupTag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK)
Arvid Brodinf266a682014-07-04 23:41:03 +020082 return false;
Murali Karicheri56703422019-04-05 13:31:25 -040083 if (hsrSupTag->HSR_TLV_Length != 12 &&
84 hsrSupTag->HSR_TLV_Length != sizeof(struct hsr_sup_payload))
Arvid Brodinf266a682014-07-04 23:41:03 +020085 return false;
86
87 return true;
88}
89
Arvid Brodinf266a682014-07-04 23:41:03 +020090static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
91 struct hsr_frame_info *frame)
92{
93 struct sk_buff *skb;
94 int copylen;
95 unsigned char *dst, *src;
96
97 skb_pull(skb_in, HSR_HLEN);
98 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
99 skb_push(skb_in, HSR_HLEN);
100 if (skb == NULL)
101 return NULL;
102
103 skb_reset_mac_header(skb);
104
105 if (skb->ip_summed == CHECKSUM_PARTIAL)
106 skb->csum_start -= HSR_HLEN;
107
108 copylen = 2*ETH_ALEN;
109 if (frame->is_vlan)
110 copylen += VLAN_HLEN;
111 src = skb_mac_header(skb_in);
112 dst = skb_mac_header(skb);
113 memcpy(dst, src, copylen);
114
115 skb->protocol = eth_hdr(skb)->h_proto;
116 return skb;
117}
118
119static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
120 struct hsr_port *port)
121{
122 if (!frame->skb_std)
123 frame->skb_std = create_stripped_skb(frame->skb_hsr, frame);
124 return skb_clone(frame->skb_std, GFP_ATOMIC);
125}
126
Arvid Brodinf266a682014-07-04 23:41:03 +0200127static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
Peter Heiseee1c2792016-04-13 13:52:22 +0200128 struct hsr_port *port, u8 protoVersion)
Arvid Brodinf266a682014-07-04 23:41:03 +0200129{
130 struct hsr_ethhdr *hsr_ethhdr;
131 int lane_id;
132 int lsdu_size;
133
134 if (port->type == HSR_PT_SLAVE_A)
135 lane_id = 0;
136 else
137 lane_id = 1;
138
139 lsdu_size = skb->len - 14;
140 if (frame->is_vlan)
141 lsdu_size -= 4;
142
143 hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
144
145 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id);
146 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
147 hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
148 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
Peter Heiseee1c2792016-04-13 13:52:22 +0200149 hsr_ethhdr->ethhdr.h_proto = htons(protoVersion ?
150 ETH_P_HSR : ETH_P_PRP);
Arvid Brodinf266a682014-07-04 23:41:03 +0200151}
152
153static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
154 struct hsr_frame_info *frame,
155 struct hsr_port *port)
156{
157 int movelen;
158 unsigned char *dst, *src;
159 struct sk_buff *skb;
160
161 /* Create the new skb with enough headroom to fit the HSR tag */
162 skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
163 if (skb == NULL)
164 return NULL;
165 skb_reset_mac_header(skb);
166
167 if (skb->ip_summed == CHECKSUM_PARTIAL)
168 skb->csum_start += HSR_HLEN;
169
170 movelen = ETH_HLEN;
171 if (frame->is_vlan)
172 movelen += VLAN_HLEN;
173
174 src = skb_mac_header(skb);
175 dst = skb_push(skb, HSR_HLEN);
176 memmove(dst, src, movelen);
177 skb_reset_mac_header(skb);
178
Peter Heiseee1c2792016-04-13 13:52:22 +0200179 hsr_fill_tag(skb, frame, port, port->hsr->protVersion);
Arvid Brodinf266a682014-07-04 23:41:03 +0200180
181 return skb;
182}
183
184/* If the original frame was an HSR tagged frame, just clone it to be sent
185 * unchanged. Otherwise, create a private frame especially tagged for 'port'.
186 */
187static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
188 struct hsr_port *port)
189{
190 if (frame->skb_hsr)
191 return skb_clone(frame->skb_hsr, GFP_ATOMIC);
192
Murali Karicheri56703422019-04-05 13:31:25 -0400193 if (port->type != HSR_PT_SLAVE_A && port->type != HSR_PT_SLAVE_B) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200194 WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
195 return NULL;
196 }
197
198 return create_tagged_skb(frame->skb_std, frame, port);
199}
200
Arvid Brodinf266a682014-07-04 23:41:03 +0200201static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
202 struct hsr_node *node_src)
203{
204 bool was_multicast_frame;
205 int res;
206
207 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
208 hsr_addr_subst_source(node_src, skb);
209 skb_pull(skb, ETH_HLEN);
210 res = netif_rx(skb);
211 if (res == NET_RX_DROP) {
212 dev->stats.rx_dropped++;
213 } else {
214 dev->stats.rx_packets++;
215 dev->stats.rx_bytes += skb->len;
216 if (was_multicast_frame)
217 dev->stats.multicast++;
218 }
219}
220
221static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
222 struct hsr_frame_info *frame)
223{
224 if (frame->port_rcv->type == HSR_PT_MASTER) {
225 hsr_addr_subst_dest(frame->node_src, skb, port);
226
227 /* Address substitution (IEC62439-3 pp 26, 50): replace mac
228 * address of outgoing frame with that of the outgoing slave's.
229 */
230 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
231 }
232 return dev_queue_xmit(skb);
233}
234
Arvid Brodinf266a682014-07-04 23:41:03 +0200235/* Forward the frame through all devices except:
236 * - Back through the receiving device
237 * - If it's a HSR frame: through a device where it has passed before
238 * - To the local HSR master only if the frame is directly addressed to it, or
239 * a non-supervision multicast or broadcast frame.
240 *
241 * HSR slave devices should insert a HSR tag into the frame, or forward the
242 * frame unchanged if it's already tagged. Interlink devices should strip HSR
243 * tags if they're of the non-HSR type (but only after duplicate discard). The
244 * master device always strips HSR tags.
245 */
246static void hsr_forward_do(struct hsr_frame_info *frame)
247{
248 struct hsr_port *port;
249 struct sk_buff *skb;
250
251 hsr_for_each_port(frame->port_rcv->hsr, port) {
252 /* Don't send frame back the way it came */
253 if (port == frame->port_rcv)
254 continue;
255
256 /* Don't deliver locally unless we should */
Murali Karicheri56703422019-04-05 13:31:25 -0400257 if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
Arvid Brodinf266a682014-07-04 23:41:03 +0200258 continue;
259
260 /* Deliver frames directly addressed to us to master only */
Murali Karicheri56703422019-04-05 13:31:25 -0400261 if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
Arvid Brodinf266a682014-07-04 23:41:03 +0200262 continue;
263
264 /* Don't send frame over port where it has been sent before */
265 if (hsr_register_frame_out(port, frame->node_src,
266 frame->sequence_nr))
267 continue;
268
Murali Karicheri56703422019-04-05 13:31:25 -0400269 if (frame->is_supervision && port->type == HSR_PT_MASTER) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200270 hsr_handle_sup_frame(frame->skb_hsr,
271 frame->node_src,
272 frame->port_rcv);
273 continue;
274 }
275
276 if (port->type != HSR_PT_MASTER)
277 skb = frame_get_tagged_skb(frame, port);
278 else
279 skb = frame_get_stripped_skb(frame, port);
280 if (skb == NULL) {
281 /* FIXME: Record the dropped frame? */
282 continue;
283 }
284
285 skb->dev = port->dev;
286 if (port->type == HSR_PT_MASTER)
287 hsr_deliver_master(skb, port->dev, frame->node_src);
288 else
289 hsr_xmit(skb, port, frame);
290 }
291}
292
Arvid Brodinf266a682014-07-04 23:41:03 +0200293static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
294 struct hsr_frame_info *frame)
295{
Arvid Brodinf266a682014-07-04 23:41:03 +0200296 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
297 frame->is_local_exclusive = true;
298 skb->pkt_type = PACKET_HOST;
299 } else {
300 frame->is_local_exclusive = false;
301 }
302
Murali Karicheri56703422019-04-05 13:31:25 -0400303 if (skb->pkt_type == PACKET_HOST ||
304 skb->pkt_type == PACKET_MULTICAST ||
305 skb->pkt_type == PACKET_BROADCAST) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200306 frame->is_local_dest = true;
307 } else {
308 frame->is_local_dest = false;
309 }
310}
311
Arvid Brodinf266a682014-07-04 23:41:03 +0200312static int hsr_fill_frame_info(struct hsr_frame_info *frame,
313 struct sk_buff *skb, struct hsr_port *port)
314{
315 struct ethhdr *ethhdr;
316 unsigned long irqflags;
317
318 frame->is_supervision = is_supervision_frame(port->hsr, skb);
Karicheri, Muralidharan675c8da2017-06-12 15:06:26 -0400319 frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
Arvid Brodinf266a682014-07-04 23:41:03 +0200320 if (frame->node_src == NULL)
321 return -1; /* Unknown node and !is_supervision, or no mem */
322
323 ethhdr = (struct ethhdr *) skb_mac_header(skb);
324 frame->is_vlan = false;
325 if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
326 frame->is_vlan = true;
327 /* FIXME: */
328 WARN_ONCE(1, "HSR: VLAN not yet supported");
329 }
Peter Heiseee1c2792016-04-13 13:52:22 +0200330 if (ethhdr->h_proto == htons(ETH_P_PRP)
331 || ethhdr->h_proto == htons(ETH_P_HSR)) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200332 frame->skb_std = NULL;
333 frame->skb_hsr = skb;
334 frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
335 } else {
336 frame->skb_std = skb;
337 frame->skb_hsr = NULL;
338 /* Sequence nr for the master node */
339 spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags);
340 frame->sequence_nr = port->hsr->sequence_nr;
341 port->hsr->sequence_nr++;
342 spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags);
343 }
344
345 frame->port_rcv = port;
346 check_local_dest(port->hsr, skb, frame);
347
348 return 0;
349}
350
351/* Must be called holding rcu read lock (because of the port parameter) */
352void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
353{
354 struct hsr_frame_info frame;
355
356 if (skb_mac_header(skb) != skb->data) {
357 WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
358 __FILE__, __LINE__, port->dev->name);
359 goto out_drop;
360 }
361
362 if (hsr_fill_frame_info(&frame, skb, port) < 0)
363 goto out_drop;
364 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
365 hsr_forward_do(&frame);
366
367 if (frame.skb_hsr != NULL)
368 kfree_skb(frame.skb_hsr);
369 if (frame.skb_std != NULL)
370 kfree_skb(frame.skb_std);
371 return;
372
373out_drop:
374 port->dev->stats.tx_dropped++;
375 kfree_skb(skb);
376}