blob: 55adb4dbd235fdccbe1b756cc1dc1ae5cd6a1535 [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodinf266a682014-07-04 23:41:03 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
Arvid Brodinf266a682014-07-04 23:41:03 +02004 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Murali Karicheri8f4c0e02020-07-22 10:40:16 -04006 *
7 * Frame router for HSR and PRP.
Arvid Brodinf266a682014-07-04 23:41:03 +02008 */
9
10#include "hsr_forward.h"
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/etherdevice.h>
14#include <linux/if_vlan.h>
15#include "hsr_main.h"
16#include "hsr_framereg.h"
17
Arvid Brodinf266a682014-07-04 23:41:03 +020018struct hsr_node;
19
20struct hsr_frame_info {
21 struct sk_buff *skb_std;
22 struct sk_buff *skb_hsr;
23 struct hsr_port *port_rcv;
24 struct hsr_node *node_src;
25 u16 sequence_nr;
26 bool is_supervision;
27 bool is_vlan;
28 bool is_local_dest;
29 bool is_local_exclusive;
30};
31
Arvid Brodinf266a682014-07-04 23:41:03 +020032/* The uses I can see for these HSR supervision frames are:
33 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
34 * 22") to reset any sequence_nr counters belonging to that node. Useful if
35 * the other node's counter has been reset for some reason.
36 * --
37 * Or not - resetting the counter and bridging the frame would create a
38 * loop, unfortunately.
39 *
40 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
41 * frame is received from a particular node, we know something is wrong.
42 * We just register these (as with normal frames) and throw them away.
43 *
44 * 3) Allow different MAC addresses for the two slave interfaces, using the
45 * MacAddressA field.
46 */
47static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
48{
Murali Karicherib1b4aa92019-04-05 13:31:32 -040049 struct ethhdr *eth_hdr;
50 struct hsr_sup_tag *hsr_sup_tag;
51 struct hsrv1_ethhdr_sp *hsr_V1_hdr;
Arvid Brodinf266a682014-07-04 23:41:03 +020052
53 WARN_ON_ONCE(!skb_mac_header_was_set(skb));
Murali Karicherib1b4aa92019-04-05 13:31:32 -040054 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +020055
Peter Heiseee1c2792016-04-13 13:52:22 +020056 /* Correct addr? */
Murali Karicherib1b4aa92019-04-05 13:31:32 -040057 if (!ether_addr_equal(eth_hdr->h_dest,
Arvid Brodinf266a682014-07-04 23:41:03 +020058 hsr->sup_multicast_addr))
59 return false;
60
Peter Heiseee1c2792016-04-13 13:52:22 +020061 /* Correct ether type?. */
Murali Karicherib1b4aa92019-04-05 13:31:32 -040062 if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
63 eth_hdr->h_proto == htons(ETH_P_HSR)))
Arvid Brodinf266a682014-07-04 23:41:03 +020064 return false;
Peter Heiseee1c2792016-04-13 13:52:22 +020065
66 /* Get the supervision header from correct location. */
Murali Karicherib1b4aa92019-04-05 13:31:32 -040067 if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
68 hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
69 if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
Peter Heiseee1c2792016-04-13 13:52:22 +020070 return false;
71
Murali Karicherib1b4aa92019-04-05 13:31:32 -040072 hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
Peter Heiseee1c2792016-04-13 13:52:22 +020073 } else {
Murali Karicherib1b4aa92019-04-05 13:31:32 -040074 hsr_sup_tag =
Murali Karicheri5fa96772019-04-05 13:31:29 -040075 &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
Peter Heiseee1c2792016-04-13 13:52:22 +020076 }
77
Murali Karicherib1b4aa92019-04-05 13:31:32 -040078 if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
79 hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK)
Arvid Brodinf266a682014-07-04 23:41:03 +020080 return false;
Murali Karicherib1b4aa92019-04-05 13:31:32 -040081 if (hsr_sup_tag->HSR_TLV_length != 12 &&
82 hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
Arvid Brodinf266a682014-07-04 23:41:03 +020083 return false;
84
85 return true;
86}
87
Arvid Brodinf266a682014-07-04 23:41:03 +020088static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
89 struct hsr_frame_info *frame)
90{
91 struct sk_buff *skb;
92 int copylen;
93 unsigned char *dst, *src;
94
95 skb_pull(skb_in, HSR_HLEN);
96 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
97 skb_push(skb_in, HSR_HLEN);
Murali Karicheri05ca6e62019-04-05 13:31:28 -040098 if (!skb)
Arvid Brodinf266a682014-07-04 23:41:03 +020099 return NULL;
100
101 skb_reset_mac_header(skb);
102
103 if (skb->ip_summed == CHECKSUM_PARTIAL)
104 skb->csum_start -= HSR_HLEN;
105
Murali Karicherid131fcc2019-04-05 13:31:31 -0400106 copylen = 2 * ETH_ALEN;
Arvid Brodinf266a682014-07-04 23:41:03 +0200107 if (frame->is_vlan)
108 copylen += VLAN_HLEN;
109 src = skb_mac_header(skb_in);
110 dst = skb_mac_header(skb);
111 memcpy(dst, src, copylen);
112
113 skb->protocol = eth_hdr(skb)->h_proto;
114 return skb;
115}
116
117static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
118 struct hsr_port *port)
119{
120 if (!frame->skb_std)
121 frame->skb_std = create_stripped_skb(frame->skb_hsr, frame);
122 return skb_clone(frame->skb_std, GFP_ATOMIC);
123}
124
Murali Karicheri5d935182020-07-20 12:43:27 -0400125static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
126 struct hsr_frame_info *frame,
127 struct hsr_port *port, u8 proto_version)
Arvid Brodinf266a682014-07-04 23:41:03 +0200128{
129 struct hsr_ethhdr *hsr_ethhdr;
130 int lane_id;
131 int lsdu_size;
132
Murali Karicheri6d6148bc2020-07-17 10:55:09 -0400133 /* pad to minimum packet size which is 60 + 6 (HSR tag) */
Murali Karicheri5d935182020-07-20 12:43:27 -0400134 if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
135 return NULL;
Murali Karicheri6d6148bc2020-07-17 10:55:09 -0400136
Arvid Brodinf266a682014-07-04 23:41:03 +0200137 if (port->type == HSR_PT_SLAVE_A)
138 lane_id = 0;
139 else
140 lane_id = 1;
141
142 lsdu_size = skb->len - 14;
143 if (frame->is_vlan)
144 lsdu_size -= 4;
145
Murali Karicheri5fa96772019-04-05 13:31:29 -0400146 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200147
148 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id);
149 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
150 hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
151 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400152 hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
Peter Heiseee1c2792016-04-13 13:52:22 +0200153 ETH_P_HSR : ETH_P_PRP);
Murali Karicheri5d935182020-07-20 12:43:27 -0400154
155 return skb;
Arvid Brodinf266a682014-07-04 23:41:03 +0200156}
157
158static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
159 struct hsr_frame_info *frame,
160 struct hsr_port *port)
161{
162 int movelen;
163 unsigned char *dst, *src;
164 struct sk_buff *skb;
165
166 /* Create the new skb with enough headroom to fit the HSR tag */
167 skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400168 if (!skb)
Arvid Brodinf266a682014-07-04 23:41:03 +0200169 return NULL;
170 skb_reset_mac_header(skb);
171
172 if (skb->ip_summed == CHECKSUM_PARTIAL)
173 skb->csum_start += HSR_HLEN;
174
175 movelen = ETH_HLEN;
176 if (frame->is_vlan)
177 movelen += VLAN_HLEN;
178
179 src = skb_mac_header(skb);
180 dst = skb_push(skb, HSR_HLEN);
181 memmove(dst, src, movelen);
182 skb_reset_mac_header(skb);
183
Murali Karicheri5d935182020-07-20 12:43:27 -0400184 /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
185 * that case
186 */
187 return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
Arvid Brodinf266a682014-07-04 23:41:03 +0200188}
189
190/* If the original frame was an HSR tagged frame, just clone it to be sent
191 * unchanged. Otherwise, create a private frame especially tagged for 'port'.
192 */
193static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
194 struct hsr_port *port)
195{
196 if (frame->skb_hsr)
197 return skb_clone(frame->skb_hsr, GFP_ATOMIC);
198
Murali Karicheri56703422019-04-05 13:31:25 -0400199 if (port->type != HSR_PT_SLAVE_A && port->type != HSR_PT_SLAVE_B) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200200 WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
201 return NULL;
202 }
203
204 return create_tagged_skb(frame->skb_std, frame, port);
205}
206
Arvid Brodinf266a682014-07-04 23:41:03 +0200207static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
208 struct hsr_node *node_src)
209{
210 bool was_multicast_frame;
211 int res;
212
213 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
214 hsr_addr_subst_source(node_src, skb);
215 skb_pull(skb, ETH_HLEN);
216 res = netif_rx(skb);
217 if (res == NET_RX_DROP) {
218 dev->stats.rx_dropped++;
219 } else {
220 dev->stats.rx_packets++;
221 dev->stats.rx_bytes += skb->len;
222 if (was_multicast_frame)
223 dev->stats.multicast++;
224 }
225}
226
227static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
228 struct hsr_frame_info *frame)
229{
230 if (frame->port_rcv->type == HSR_PT_MASTER) {
231 hsr_addr_subst_dest(frame->node_src, skb, port);
232
233 /* Address substitution (IEC62439-3 pp 26, 50): replace mac
234 * address of outgoing frame with that of the outgoing slave's.
235 */
236 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
237 }
238 return dev_queue_xmit(skb);
239}
240
Arvid Brodinf266a682014-07-04 23:41:03 +0200241/* Forward the frame through all devices except:
242 * - Back through the receiving device
243 * - If it's a HSR frame: through a device where it has passed before
244 * - To the local HSR master only if the frame is directly addressed to it, or
245 * a non-supervision multicast or broadcast frame.
246 *
247 * HSR slave devices should insert a HSR tag into the frame, or forward the
248 * frame unchanged if it's already tagged. Interlink devices should strip HSR
249 * tags if they're of the non-HSR type (but only after duplicate discard). The
250 * master device always strips HSR tags.
251 */
252static void hsr_forward_do(struct hsr_frame_info *frame)
253{
254 struct hsr_port *port;
255 struct sk_buff *skb;
256
257 hsr_for_each_port(frame->port_rcv->hsr, port) {
258 /* Don't send frame back the way it came */
259 if (port == frame->port_rcv)
260 continue;
261
262 /* Don't deliver locally unless we should */
Murali Karicheri56703422019-04-05 13:31:25 -0400263 if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
Arvid Brodinf266a682014-07-04 23:41:03 +0200264 continue;
265
266 /* Deliver frames directly addressed to us to master only */
Murali Karicheri56703422019-04-05 13:31:25 -0400267 if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
Arvid Brodinf266a682014-07-04 23:41:03 +0200268 continue;
269
270 /* Don't send frame over port where it has been sent before */
271 if (hsr_register_frame_out(port, frame->node_src,
272 frame->sequence_nr))
273 continue;
274
Murali Karicheri56703422019-04-05 13:31:25 -0400275 if (frame->is_supervision && port->type == HSR_PT_MASTER) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200276 hsr_handle_sup_frame(frame->skb_hsr,
277 frame->node_src,
278 frame->port_rcv);
279 continue;
280 }
281
282 if (port->type != HSR_PT_MASTER)
283 skb = frame_get_tagged_skb(frame, port);
284 else
285 skb = frame_get_stripped_skb(frame, port);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400286 if (!skb) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200287 /* FIXME: Record the dropped frame? */
288 continue;
289 }
290
291 skb->dev = port->dev;
292 if (port->type == HSR_PT_MASTER)
293 hsr_deliver_master(skb, port->dev, frame->node_src);
294 else
295 hsr_xmit(skb, port, frame);
296 }
297}
298
Arvid Brodinf266a682014-07-04 23:41:03 +0200299static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
300 struct hsr_frame_info *frame)
301{
Arvid Brodinf266a682014-07-04 23:41:03 +0200302 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
303 frame->is_local_exclusive = true;
304 skb->pkt_type = PACKET_HOST;
305 } else {
306 frame->is_local_exclusive = false;
307 }
308
Murali Karicheri56703422019-04-05 13:31:25 -0400309 if (skb->pkt_type == PACKET_HOST ||
310 skb->pkt_type == PACKET_MULTICAST ||
311 skb->pkt_type == PACKET_BROADCAST) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200312 frame->is_local_dest = true;
313 } else {
314 frame->is_local_dest = false;
315 }
316}
317
Arvid Brodinf266a682014-07-04 23:41:03 +0200318static int hsr_fill_frame_info(struct hsr_frame_info *frame,
319 struct sk_buff *skb, struct hsr_port *port)
320{
321 struct ethhdr *ethhdr;
322 unsigned long irqflags;
323
324 frame->is_supervision = is_supervision_frame(port->hsr, skb);
Karicheri, Muralidharan675c8da2017-06-12 15:06:26 -0400325 frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400326 if (!frame->node_src)
Arvid Brodinf266a682014-07-04 23:41:03 +0200327 return -1; /* Unknown node and !is_supervision, or no mem */
328
Murali Karicheri5fa96772019-04-05 13:31:29 -0400329 ethhdr = (struct ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200330 frame->is_vlan = false;
331 if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
332 frame->is_vlan = true;
333 /* FIXME: */
Taehee Yoof96e8712020-05-06 15:47:45 +0000334 netdev_warn_once(skb->dev, "VLAN not yet supported");
Arvid Brodinf266a682014-07-04 23:41:03 +0200335 }
Murali Karicheri05947782019-04-05 13:31:30 -0400336 if (ethhdr->h_proto == htons(ETH_P_PRP) ||
337 ethhdr->h_proto == htons(ETH_P_HSR)) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200338 frame->skb_std = NULL;
339 frame->skb_hsr = skb;
340 frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
341 } else {
342 frame->skb_std = skb;
343 frame->skb_hsr = NULL;
344 /* Sequence nr for the master node */
345 spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags);
346 frame->sequence_nr = port->hsr->sequence_nr;
347 port->hsr->sequence_nr++;
348 spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags);
349 }
350
351 frame->port_rcv = port;
352 check_local_dest(port->hsr, skb, frame);
353
354 return 0;
355}
356
357/* Must be called holding rcu read lock (because of the port parameter) */
358void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
359{
360 struct hsr_frame_info frame;
361
362 if (skb_mac_header(skb) != skb->data) {
363 WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
364 __FILE__, __LINE__, port->dev->name);
365 goto out_drop;
366 }
367
368 if (hsr_fill_frame_info(&frame, skb, port) < 0)
369 goto out_drop;
370 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
371 hsr_forward_do(&frame);
Murali Karicheriee2c46f2019-04-15 11:36:03 -0400372 /* Gets called for ingress frames as well as egress from master port.
373 * So check and increment stats for master port only here.
374 */
375 if (port->type == HSR_PT_MASTER) {
376 port->dev->stats.tx_packets++;
377 port->dev->stats.tx_bytes += skb->len;
378 }
Arvid Brodinf266a682014-07-04 23:41:03 +0200379
Wang Hai2b966922020-07-18 20:53:38 +0800380 kfree_skb(frame.skb_hsr);
381 kfree_skb(frame.skb_std);
Arvid Brodinf266a682014-07-04 23:41:03 +0200382 return;
383
384out_drop:
385 port->dev->stats.tx_dropped++;
386 kfree_skb(skb);
387}