blob: e59cbb4f0cd15e343592a0303461d639fd5babd6 [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodinf266a682014-07-04 23:41:03 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
Arvid Brodinf266a682014-07-04 23:41:03 +02004 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Murali Karicheri8f4c0e02020-07-22 10:40:16 -04006 *
7 * Frame router for HSR and PRP.
Arvid Brodinf266a682014-07-04 23:41:03 +02008 */
9
10#include "hsr_forward.h"
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/etherdevice.h>
14#include <linux/if_vlan.h>
15#include "hsr_main.h"
16#include "hsr_framereg.h"
17
Arvid Brodinf266a682014-07-04 23:41:03 +020018struct hsr_node;
19
Arvid Brodinf266a682014-07-04 23:41:03 +020020/* The uses I can see for these HSR supervision frames are:
21 * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
22 * 22") to reset any sequence_nr counters belonging to that node. Useful if
23 * the other node's counter has been reset for some reason.
24 * --
25 * Or not - resetting the counter and bridging the frame would create a
26 * loop, unfortunately.
27 *
28 * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
29 * frame is received from a particular node, we know something is wrong.
30 * We just register these (as with normal frames) and throw them away.
31 *
32 * 3) Allow different MAC addresses for the two slave interfaces, using the
33 * MacAddressA field.
34 */
35static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
36{
Murali Karicherib1b4aa92019-04-05 13:31:32 -040037 struct ethhdr *eth_hdr;
38 struct hsr_sup_tag *hsr_sup_tag;
39 struct hsrv1_ethhdr_sp *hsr_V1_hdr;
Andreas Oetkeneafaa882021-10-25 20:56:18 +020040 struct hsr_sup_tlv *hsr_sup_tlv;
41 u16 total_length = 0;
Arvid Brodinf266a682014-07-04 23:41:03 +020042
43 WARN_ON_ONCE(!skb_mac_header_was_set(skb));
Murali Karicherib1b4aa92019-04-05 13:31:32 -040044 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +020045
Peter Heiseee1c2792016-04-13 13:52:22 +020046 /* Correct addr? */
Murali Karicherib1b4aa92019-04-05 13:31:32 -040047 if (!ether_addr_equal(eth_hdr->h_dest,
Arvid Brodinf266a682014-07-04 23:41:03 +020048 hsr->sup_multicast_addr))
49 return false;
50
Peter Heiseee1c2792016-04-13 13:52:22 +020051 /* Correct ether type?. */
Murali Karicherib1b4aa92019-04-05 13:31:32 -040052 if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
53 eth_hdr->h_proto == htons(ETH_P_HSR)))
Arvid Brodinf266a682014-07-04 23:41:03 +020054 return false;
Peter Heiseee1c2792016-04-13 13:52:22 +020055
56 /* Get the supervision header from correct location. */
Murali Karicherib1b4aa92019-04-05 13:31:32 -040057 if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
Andreas Oetkeneafaa882021-10-25 20:56:18 +020058 total_length = sizeof(struct hsrv1_ethhdr_sp);
59 if (!pskb_may_pull(skb, total_length))
60 return false;
61
Murali Karicherib1b4aa92019-04-05 13:31:32 -040062 hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
63 if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
Peter Heiseee1c2792016-04-13 13:52:22 +020064 return false;
65
Murali Karicherib1b4aa92019-04-05 13:31:32 -040066 hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
Peter Heiseee1c2792016-04-13 13:52:22 +020067 } else {
Andreas Oetkeneafaa882021-10-25 20:56:18 +020068 total_length = sizeof(struct hsrv0_ethhdr_sp);
69 if (!pskb_may_pull(skb, total_length))
70 return false;
71
Murali Karicherib1b4aa92019-04-05 13:31:32 -040072 hsr_sup_tag =
Murali Karicheri5fa96772019-04-05 13:31:29 -040073 &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
Peter Heiseee1c2792016-04-13 13:52:22 +020074 }
75
Andreas Oetkeneafaa882021-10-25 20:56:18 +020076 if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
77 hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
78 hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
79 hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
Arvid Brodinf266a682014-07-04 23:41:03 +020080 return false;
Andreas Oetkeneafaa882021-10-25 20:56:18 +020081 if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
82 hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
83 return false;
84
85 /* Get next tlv */
86 total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
87 if (!pskb_may_pull(skb, total_length))
88 return false;
89 skb_pull(skb, total_length);
90 hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
91 skb_push(skb, total_length);
92
93 /* if this is a redbox supervision frame we need to verify
94 * that more data is available
95 */
96 if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
97 /* tlv length must be a length of a mac address */
98 if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
99 return false;
100
101 /* make sure another tlv follows */
102 total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
103 if (!pskb_may_pull(skb, total_length))
104 return false;
105
106 /* get next tlv */
107 skb_pull(skb, total_length);
108 hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
109 skb_push(skb, total_length);
110 }
111
112 /* end of tlvs must follow at the end */
113 if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
114 hsr_sup_tlv->HSR_TLV_length != 0)
Arvid Brodinf266a682014-07-04 23:41:03 +0200115 return false;
116
117 return true;
118}
119
Murali Karicheri451d8122020-07-22 10:40:21 -0400120static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
121 struct hsr_frame_info *frame)
Arvid Brodinf266a682014-07-04 23:41:03 +0200122{
123 struct sk_buff *skb;
124 int copylen;
125 unsigned char *dst, *src;
126
127 skb_pull(skb_in, HSR_HLEN);
128 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
129 skb_push(skb_in, HSR_HLEN);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400130 if (!skb)
Arvid Brodinf266a682014-07-04 23:41:03 +0200131 return NULL;
132
133 skb_reset_mac_header(skb);
134
135 if (skb->ip_summed == CHECKSUM_PARTIAL)
136 skb->csum_start -= HSR_HLEN;
137
Murali Karicherid131fcc2019-04-05 13:31:31 -0400138 copylen = 2 * ETH_ALEN;
Arvid Brodinf266a682014-07-04 23:41:03 +0200139 if (frame->is_vlan)
140 copylen += VLAN_HLEN;
141 src = skb_mac_header(skb_in);
142 dst = skb_mac_header(skb);
143 memcpy(dst, src, copylen);
144
145 skb->protocol = eth_hdr(skb)->h_proto;
146 return skb;
147}
148
Murali Karicherifa4dc892020-07-22 10:40:20 -0400149struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
150 struct hsr_port *port)
Arvid Brodinf266a682014-07-04 23:41:03 +0200151{
Murali Karicheri451d8122020-07-22 10:40:21 -0400152 if (!frame->skb_std) {
153 if (frame->skb_hsr) {
154 frame->skb_std =
155 create_stripped_skb_hsr(frame->skb_hsr, frame);
156 } else {
157 /* Unexpected */
158 WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
159 __FILE__, __LINE__, port->dev->name);
160 return NULL;
161 }
162 }
163
Arvid Brodinf266a682014-07-04 23:41:03 +0200164 return skb_clone(frame->skb_std, GFP_ATOMIC);
165}
166
Murali Karicheri451d8122020-07-22 10:40:21 -0400167struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
168 struct hsr_port *port)
169{
170 if (!frame->skb_std) {
171 if (frame->skb_prp) {
172 /* trim the skb by len - HSR_HLEN to exclude RCT */
173 skb_trim(frame->skb_prp,
174 frame->skb_prp->len - HSR_HLEN);
175 frame->skb_std =
176 __pskb_copy(frame->skb_prp,
177 skb_headroom(frame->skb_prp),
178 GFP_ATOMIC);
179 } else {
180 /* Unexpected */
181 WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
182 __FILE__, __LINE__, port->dev->name);
183 return NULL;
184 }
185 }
186
187 return skb_clone(frame->skb_std, GFP_ATOMIC);
188}
189
190static void prp_set_lan_id(struct prp_rct *trailer,
191 struct hsr_port *port)
192{
193 int lane_id;
194
195 if (port->type == HSR_PT_SLAVE_A)
196 lane_id = 0;
197 else
198 lane_id = 1;
199
200 /* Add net_id in the upper 3 bits of lane_id */
201 lane_id |= port->hsr->net_id;
202 set_prp_lan_id(trailer, lane_id);
203}
204
205/* Tailroom for PRP rct should have been created before calling this */
206static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
207 struct hsr_frame_info *frame,
208 struct hsr_port *port)
209{
210 struct prp_rct *trailer;
211 int min_size = ETH_ZLEN;
212 int lsdu_size;
213
214 if (!skb)
215 return skb;
216
217 if (frame->is_vlan)
218 min_size = VLAN_ETH_ZLEN;
219
220 if (skb_put_padto(skb, min_size))
221 return NULL;
222
223 trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
224 lsdu_size = skb->len - 14;
225 if (frame->is_vlan)
226 lsdu_size -= 4;
227 prp_set_lan_id(trailer, port);
228 set_prp_LSDU_size(trailer, lsdu_size);
229 trailer->sequence_nr = htons(frame->sequence_nr);
230 trailer->PRP_suffix = htons(ETH_P_PRP);
George McCollister78be9212021-02-09 19:02:10 -0600231 skb->protocol = eth_hdr(skb)->h_proto;
Murali Karicheri451d8122020-07-22 10:40:21 -0400232
233 return skb;
234}
235
236static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
237 struct hsr_port *port)
238{
239 int path_id;
240
241 if (port->type == HSR_PT_SLAVE_A)
242 path_id = 0;
243 else
244 path_id = 1;
245
246 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
247}
248
Murali Karicheri5d935182020-07-20 12:43:27 -0400249static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
250 struct hsr_frame_info *frame,
251 struct hsr_port *port, u8 proto_version)
Arvid Brodinf266a682014-07-04 23:41:03 +0200252{
253 struct hsr_ethhdr *hsr_ethhdr;
Arvid Brodinf266a682014-07-04 23:41:03 +0200254 int lsdu_size;
255
Murali Karicheri6d6148bc2020-07-17 10:55:09 -0400256 /* pad to minimum packet size which is 60 + 6 (HSR tag) */
Murali Karicheri5d935182020-07-20 12:43:27 -0400257 if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
258 return NULL;
Murali Karicheri6d6148bc2020-07-17 10:55:09 -0400259
Arvid Brodinf266a682014-07-04 23:41:03 +0200260 lsdu_size = skb->len - 14;
261 if (frame->is_vlan)
262 lsdu_size -= 4;
263
Murali Karicheri5fa96772019-04-05 13:31:29 -0400264 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200265
Murali Karicheri451d8122020-07-22 10:40:21 -0400266 hsr_set_path_id(hsr_ethhdr, port);
Arvid Brodinf266a682014-07-04 23:41:03 +0200267 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
268 hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
269 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400270 hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
Peter Heiseee1c2792016-04-13 13:52:22 +0200271 ETH_P_HSR : ETH_P_PRP);
George McCollister78be9212021-02-09 19:02:10 -0600272 skb->protocol = hsr_ethhdr->ethhdr.h_proto;
Murali Karicheri5d935182020-07-20 12:43:27 -0400273
274 return skb;
Arvid Brodinf266a682014-07-04 23:41:03 +0200275}
276
Murali Karicheri451d8122020-07-22 10:40:21 -0400277/* If the original frame was an HSR tagged frame, just clone it to be sent
278 * unchanged. Otherwise, create a private frame especially tagged for 'port'.
279 */
280struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
281 struct hsr_port *port)
Arvid Brodinf266a682014-07-04 23:41:03 +0200282{
Arvid Brodinf266a682014-07-04 23:41:03 +0200283 unsigned char *dst, *src;
284 struct sk_buff *skb;
Murali Karicheri451d8122020-07-22 10:40:21 -0400285 int movelen;
286
287 if (frame->skb_hsr) {
288 struct hsr_ethhdr *hsr_ethhdr =
289 (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
290
291 /* set the lane id properly */
292 hsr_set_path_id(hsr_ethhdr, port);
293 return skb_clone(frame->skb_hsr, GFP_ATOMIC);
George McCollisterdcf0cd12021-02-09 19:02:11 -0600294 } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
295 return skb_clone(frame->skb_std, GFP_ATOMIC);
Murali Karicheri451d8122020-07-22 10:40:21 -0400296 }
Arvid Brodinf266a682014-07-04 23:41:03 +0200297
298 /* Create the new skb with enough headroom to fit the HSR tag */
Murali Karicheri451d8122020-07-22 10:40:21 -0400299 skb = __pskb_copy(frame->skb_std,
300 skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400301 if (!skb)
Arvid Brodinf266a682014-07-04 23:41:03 +0200302 return NULL;
303 skb_reset_mac_header(skb);
304
305 if (skb->ip_summed == CHECKSUM_PARTIAL)
306 skb->csum_start += HSR_HLEN;
307
308 movelen = ETH_HLEN;
309 if (frame->is_vlan)
310 movelen += VLAN_HLEN;
311
312 src = skb_mac_header(skb);
313 dst = skb_push(skb, HSR_HLEN);
314 memmove(dst, src, movelen);
315 skb_reset_mac_header(skb);
316
Murali Karicheri5d935182020-07-20 12:43:27 -0400317 /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
318 * that case
319 */
320 return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
Arvid Brodinf266a682014-07-04 23:41:03 +0200321}
322
Murali Karicheri451d8122020-07-22 10:40:21 -0400323struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
Murali Karicherifa4dc892020-07-22 10:40:20 -0400324 struct hsr_port *port)
Arvid Brodinf266a682014-07-04 23:41:03 +0200325{
Murali Karicheri451d8122020-07-22 10:40:21 -0400326 struct sk_buff *skb;
Arvid Brodinf266a682014-07-04 23:41:03 +0200327
Murali Karicheri451d8122020-07-22 10:40:21 -0400328 if (frame->skb_prp) {
329 struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
330
331 if (trailer) {
332 prp_set_lan_id(trailer, port);
333 } else {
334 WARN_ONCE(!trailer, "errored PRP skb");
335 return NULL;
336 }
337 return skb_clone(frame->skb_prp, GFP_ATOMIC);
George McCollisterdcf0cd12021-02-09 19:02:11 -0600338 } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
339 return skb_clone(frame->skb_std, GFP_ATOMIC);
Arvid Brodinf266a682014-07-04 23:41:03 +0200340 }
341
Murali Karicheri451d8122020-07-22 10:40:21 -0400342 skb = skb_copy_expand(frame->skb_std, 0,
343 skb_tailroom(frame->skb_std) + HSR_HLEN,
344 GFP_ATOMIC);
345 prp_fill_rct(skb, frame, port);
346
347 return skb;
Arvid Brodinf266a682014-07-04 23:41:03 +0200348}
349
Arvid Brodinf266a682014-07-04 23:41:03 +0200350static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
351 struct hsr_node *node_src)
352{
353 bool was_multicast_frame;
354 int res;
355
356 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
357 hsr_addr_subst_source(node_src, skb);
358 skb_pull(skb, ETH_HLEN);
359 res = netif_rx(skb);
360 if (res == NET_RX_DROP) {
361 dev->stats.rx_dropped++;
362 } else {
363 dev->stats.rx_packets++;
364 dev->stats.rx_bytes += skb->len;
365 if (was_multicast_frame)
366 dev->stats.multicast++;
367 }
368}
369
370static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
371 struct hsr_frame_info *frame)
372{
373 if (frame->port_rcv->type == HSR_PT_MASTER) {
374 hsr_addr_subst_dest(frame->node_src, skb, port);
375
376 /* Address substitution (IEC62439-3 pp 26, 50): replace mac
377 * address of outgoing frame with that of the outgoing slave's.
378 */
379 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
380 }
381 return dev_queue_xmit(skb);
382}
383
Murali Karicheri451d8122020-07-22 10:40:21 -0400384bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
385{
386 return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
387 port->type == HSR_PT_SLAVE_B) ||
388 (frame->port_rcv->type == HSR_PT_SLAVE_B &&
389 port->type == HSR_PT_SLAVE_A));
390}
391
George McCollisterdcf0cd12021-02-09 19:02:11 -0600392bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
393{
394 if (port->dev->features & NETIF_F_HW_HSR_FWD)
395 return prp_drop_frame(frame, port);
396
397 return false;
398}
399
Arvid Brodinf266a682014-07-04 23:41:03 +0200400/* Forward the frame through all devices except:
401 * - Back through the receiving device
402 * - If it's a HSR frame: through a device where it has passed before
Murali Karicheri451d8122020-07-22 10:40:21 -0400403 * - if it's a PRP frame: through another PRP slave device (no bridge)
Arvid Brodinf266a682014-07-04 23:41:03 +0200404 * - To the local HSR master only if the frame is directly addressed to it, or
405 * a non-supervision multicast or broadcast frame.
406 *
407 * HSR slave devices should insert a HSR tag into the frame, or forward the
408 * frame unchanged if it's already tagged. Interlink devices should strip HSR
409 * tags if they're of the non-HSR type (but only after duplicate discard). The
410 * master device always strips HSR tags.
411 */
412static void hsr_forward_do(struct hsr_frame_info *frame)
413{
414 struct hsr_port *port;
415 struct sk_buff *skb;
George McCollisterdcf0cd12021-02-09 19:02:11 -0600416 bool sent = false;
Arvid Brodinf266a682014-07-04 23:41:03 +0200417
418 hsr_for_each_port(frame->port_rcv->hsr, port) {
Murali Karicherifa4dc892020-07-22 10:40:20 -0400419 struct hsr_priv *hsr = port->hsr;
Arvid Brodinf266a682014-07-04 23:41:03 +0200420 /* Don't send frame back the way it came */
421 if (port == frame->port_rcv)
422 continue;
423
424 /* Don't deliver locally unless we should */
Murali Karicheri56703422019-04-05 13:31:25 -0400425 if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
Arvid Brodinf266a682014-07-04 23:41:03 +0200426 continue;
427
428 /* Deliver frames directly addressed to us to master only */
Murali Karicheri56703422019-04-05 13:31:25 -0400429 if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
Arvid Brodinf266a682014-07-04 23:41:03 +0200430 continue;
431
George McCollisterdcf0cd12021-02-09 19:02:11 -0600432 /* If hardware duplicate generation is enabled, only send out
433 * one port.
434 */
435 if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
436 continue;
437
Murali Karicheri451d8122020-07-22 10:40:21 -0400438 /* Don't send frame over port where it has been sent before.
439 * Also fro SAN, this shouldn't be done.
440 */
441 if (!frame->is_from_san &&
442 hsr_register_frame_out(port, frame->node_src,
Arvid Brodinf266a682014-07-04 23:41:03 +0200443 frame->sequence_nr))
444 continue;
445
Murali Karicheri56703422019-04-05 13:31:25 -0400446 if (frame->is_supervision && port->type == HSR_PT_MASTER) {
Murali Karicheri451d8122020-07-22 10:40:21 -0400447 hsr_handle_sup_frame(frame);
Arvid Brodinf266a682014-07-04 23:41:03 +0200448 continue;
449 }
450
Murali Karicheri451d8122020-07-22 10:40:21 -0400451 /* Check if frame is to be dropped. Eg. for PRP no forward
452 * between ports.
453 */
454 if (hsr->proto_ops->drop_frame &&
455 hsr->proto_ops->drop_frame(frame, port))
456 continue;
457
Arvid Brodinf266a682014-07-04 23:41:03 +0200458 if (port->type != HSR_PT_MASTER)
Murali Karicherifa4dc892020-07-22 10:40:20 -0400459 skb = hsr->proto_ops->create_tagged_frame(frame, port);
Arvid Brodinf266a682014-07-04 23:41:03 +0200460 else
Murali Karicherifa4dc892020-07-22 10:40:20 -0400461 skb = hsr->proto_ops->get_untagged_frame(frame, port);
462
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400463 if (!skb) {
Murali Karicheri451d8122020-07-22 10:40:21 -0400464 frame->port_rcv->dev->stats.rx_dropped++;
Arvid Brodinf266a682014-07-04 23:41:03 +0200465 continue;
466 }
467
468 skb->dev = port->dev;
George McCollisterdcf0cd12021-02-09 19:02:11 -0600469 if (port->type == HSR_PT_MASTER) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200470 hsr_deliver_master(skb, port->dev, frame->node_src);
George McCollisterdcf0cd12021-02-09 19:02:11 -0600471 } else {
472 if (!hsr_xmit(skb, port, frame))
473 sent = true;
474 }
Arvid Brodinf266a682014-07-04 23:41:03 +0200475 }
476}
477
Arvid Brodinf266a682014-07-04 23:41:03 +0200478static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
479 struct hsr_frame_info *frame)
480{
Arvid Brodinf266a682014-07-04 23:41:03 +0200481 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
482 frame->is_local_exclusive = true;
483 skb->pkt_type = PACKET_HOST;
484 } else {
485 frame->is_local_exclusive = false;
486 }
487
Murali Karicheri56703422019-04-05 13:31:25 -0400488 if (skb->pkt_type == PACKET_HOST ||
489 skb->pkt_type == PACKET_MULTICAST ||
490 skb->pkt_type == PACKET_BROADCAST) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200491 frame->is_local_dest = true;
492 } else {
493 frame->is_local_dest = false;
494 }
495}
496
Murali Karicheri451d8122020-07-22 10:40:21 -0400497static void handle_std_frame(struct sk_buff *skb,
498 struct hsr_frame_info *frame)
Arvid Brodinf266a682014-07-04 23:41:03 +0200499{
Murali Karicheri451d8122020-07-22 10:40:21 -0400500 struct hsr_port *port = frame->port_rcv;
501 struct hsr_priv *hsr = port->hsr;
Arvid Brodinf266a682014-07-04 23:41:03 +0200502 unsigned long irqflags;
503
Murali Karicheri451d8122020-07-22 10:40:21 -0400504 frame->skb_hsr = NULL;
505 frame->skb_prp = NULL;
506 frame->skb_std = skb;
507
508 if (port->type != HSR_PT_MASTER) {
509 frame->is_from_san = true;
Murali Karicherifa4dc892020-07-22 10:40:20 -0400510 } else {
Murali Karicherifa4dc892020-07-22 10:40:20 -0400511 /* Sequence nr for the master node */
512 spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
513 frame->sequence_nr = hsr->sequence_nr;
514 hsr->sequence_nr++;
515 spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
516 }
517}
518
George McCollister48b491a2021-05-24 13:50:54 -0500519int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
520 struct hsr_frame_info *frame)
Murali Karicheri451d8122020-07-22 10:40:21 -0400521{
George McCollister78be9212021-02-09 19:02:10 -0600522 struct hsr_port *port = frame->port_rcv;
523 struct hsr_priv *hsr = port->hsr;
524
525 /* HSRv0 supervisory frames double as a tag so treat them as tagged. */
526 if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
Murali Karicheri451d8122020-07-22 10:40:21 -0400527 proto == htons(ETH_P_HSR)) {
George McCollister48b491a2021-05-24 13:50:54 -0500528 /* Check if skb contains hsr_ethhdr */
529 if (skb->mac_len < sizeof(struct hsr_ethhdr))
530 return -EINVAL;
531
Murali Karicheri451d8122020-07-22 10:40:21 -0400532 /* HSR tagged frame :- Data or Supervision */
533 frame->skb_std = NULL;
534 frame->skb_prp = NULL;
535 frame->skb_hsr = skb;
536 frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
George McCollister48b491a2021-05-24 13:50:54 -0500537 return 0;
Murali Karicheri451d8122020-07-22 10:40:21 -0400538 }
539
540 /* Standard frame or PRP from master port */
541 handle_std_frame(skb, frame);
George McCollister48b491a2021-05-24 13:50:54 -0500542
543 return 0;
Murali Karicheri451d8122020-07-22 10:40:21 -0400544}
545
George McCollister48b491a2021-05-24 13:50:54 -0500546int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
547 struct hsr_frame_info *frame)
Murali Karicheri451d8122020-07-22 10:40:21 -0400548{
549 /* Supervision frame */
550 struct prp_rct *rct = skb_get_PRP_rct(skb);
551
552 if (rct &&
553 prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
554 frame->skb_hsr = NULL;
555 frame->skb_std = NULL;
556 frame->skb_prp = skb;
557 frame->sequence_nr = prp_get_skb_sequence_nr(rct);
George McCollister48b491a2021-05-24 13:50:54 -0500558 return 0;
Murali Karicheri451d8122020-07-22 10:40:21 -0400559 }
560 handle_std_frame(skb, frame);
George McCollister48b491a2021-05-24 13:50:54 -0500561
562 return 0;
Murali Karicheri451d8122020-07-22 10:40:21 -0400563}
564
Murali Karicherifa4dc892020-07-22 10:40:20 -0400565static int fill_frame_info(struct hsr_frame_info *frame,
566 struct sk_buff *skb, struct hsr_port *port)
567{
568 struct hsr_priv *hsr = port->hsr;
Murali Karicheri451d8122020-07-22 10:40:21 -0400569 struct hsr_vlan_ethhdr *vlan_hdr;
Murali Karicherifa4dc892020-07-22 10:40:20 -0400570 struct ethhdr *ethhdr;
571 __be16 proto;
George McCollister48b491a2021-05-24 13:50:54 -0500572 int ret;
Murali Karicherifa4dc892020-07-22 10:40:20 -0400573
George McCollister48b491a2021-05-24 13:50:54 -0500574 /* Check if skb contains ethhdr */
575 if (skb->mac_len < sizeof(struct ethhdr))
Phillip Potter2e9f6092021-05-02 22:34:42 +0100576 return -EINVAL;
577
Murali Karicheri451d8122020-07-22 10:40:21 -0400578 memset(frame, 0, sizeof(*frame));
Arvid Brodinf266a682014-07-04 23:41:03 +0200579 frame->is_supervision = is_supervision_frame(port->hsr, skb);
Murali Karicheri451d8122020-07-22 10:40:21 -0400580 frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
581 frame->is_supervision,
582 port->type);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400583 if (!frame->node_src)
Arvid Brodinf266a682014-07-04 23:41:03 +0200584 return -1; /* Unknown node and !is_supervision, or no mem */
585
Murali Karicheri5fa96772019-04-05 13:31:29 -0400586 ethhdr = (struct ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200587 frame->is_vlan = false;
Murali Karicheri451d8122020-07-22 10:40:21 -0400588 proto = ethhdr->h_proto;
589
590 if (proto == htons(ETH_P_8021Q))
Arvid Brodinf266a682014-07-04 23:41:03 +0200591 frame->is_vlan = true;
Murali Karicheri451d8122020-07-22 10:40:21 -0400592
593 if (frame->is_vlan) {
594 vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
595 proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
Arvid Brodinf266a682014-07-04 23:41:03 +0200596 /* FIXME: */
Taehee Yoof96e8712020-05-06 15:47:45 +0000597 netdev_warn_once(skb->dev, "VLAN not yet supported");
Arvid Brodinf266a682014-07-04 23:41:03 +0200598 }
Murali Karicheri451d8122020-07-22 10:40:21 -0400599
600 frame->is_from_san = false;
Arvid Brodinf266a682014-07-04 23:41:03 +0200601 frame->port_rcv = port;
George McCollister48b491a2021-05-24 13:50:54 -0500602 ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
603 if (ret)
604 return ret;
605
Murali Karicheri451d8122020-07-22 10:40:21 -0400606 check_local_dest(port->hsr, skb, frame);
Arvid Brodinf266a682014-07-04 23:41:03 +0200607
608 return 0;
609}
610
611/* Must be called holding rcu read lock (because of the port parameter) */
612void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
613{
614 struct hsr_frame_info frame;
615
Murali Karicherifa4dc892020-07-22 10:40:20 -0400616 if (fill_frame_info(&frame, skb, port) < 0)
Arvid Brodinf266a682014-07-04 23:41:03 +0200617 goto out_drop;
Murali Karicheri451d8122020-07-22 10:40:21 -0400618
Arvid Brodinf266a682014-07-04 23:41:03 +0200619 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
620 hsr_forward_do(&frame);
Murali Karicheriee2c46f2019-04-15 11:36:03 -0400621 /* Gets called for ingress frames as well as egress from master port.
622 * So check and increment stats for master port only here.
623 */
624 if (port->type == HSR_PT_MASTER) {
625 port->dev->stats.tx_packets++;
626 port->dev->stats.tx_bytes += skb->len;
627 }
Arvid Brodinf266a682014-07-04 23:41:03 +0200628
Wang Hai2b966922020-07-18 20:53:38 +0800629 kfree_skb(frame.skb_hsr);
Murali Karicheri451d8122020-07-22 10:40:21 -0400630 kfree_skb(frame.skb_prp);
Wang Hai2b966922020-07-18 20:53:38 +0800631 kfree_skb(frame.skb_std);
Arvid Brodinf266a682014-07-04 23:41:03 +0200632 return;
633
634out_drop:
635 port->dev->stats.tx_dropped++;
636 kfree_skb(skb);
637}