Murali Karicheri | 0e7623b | 2019-04-05 13:31:34 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 2 | /* Copyright 2011-2014 Autronica Fire and Security AS |
| 3 | * |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 4 | * Author(s): |
| 5 | * 2011-2014 Arvid Brodin, arvid.brodin@alten.se |
Murali Karicheri | 8f4c0e0 | 2020-07-22 10:40:16 -0400 | [diff] [blame] | 6 | * |
| 7 | * Frame router for HSR and PRP. |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include "hsr_forward.h" |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/skbuff.h> |
| 13 | #include <linux/etherdevice.h> |
| 14 | #include <linux/if_vlan.h> |
| 15 | #include "hsr_main.h" |
| 16 | #include "hsr_framereg.h" |
| 17 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 18 | struct hsr_node; |
| 19 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 20 | /* The uses I can see for these HSR supervision frames are: |
| 21 | * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type = |
| 22 | * 22") to reset any sequence_nr counters belonging to that node. Useful if |
| 23 | * the other node's counter has been reset for some reason. |
| 24 | * -- |
| 25 | * Or not - resetting the counter and bridging the frame would create a |
| 26 | * loop, unfortunately. |
| 27 | * |
| 28 | * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck |
| 29 | * frame is received from a particular node, we know something is wrong. |
| 30 | * We just register these (as with normal frames) and throw them away. |
| 31 | * |
| 32 | * 3) Allow different MAC addresses for the two slave interfaces, using the |
| 33 | * MacAddressA field. |
| 34 | */ |
| 35 | static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) |
| 36 | { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 37 | struct ethhdr *eth_hdr; |
| 38 | struct hsr_sup_tag *hsr_sup_tag; |
| 39 | struct hsrv1_ethhdr_sp *hsr_V1_hdr; |
Andreas Oetken | eafaa88 | 2021-10-25 20:56:18 +0200 | [diff] [blame] | 40 | struct hsr_sup_tlv *hsr_sup_tlv; |
| 41 | u16 total_length = 0; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 42 | |
| 43 | WARN_ON_ONCE(!skb_mac_header_was_set(skb)); |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 44 | eth_hdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 45 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 46 | /* Correct addr? */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 47 | if (!ether_addr_equal(eth_hdr->h_dest, |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 48 | hsr->sup_multicast_addr)) |
| 49 | return false; |
| 50 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 51 | /* Correct ether type?. */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 52 | if (!(eth_hdr->h_proto == htons(ETH_P_PRP) || |
| 53 | eth_hdr->h_proto == htons(ETH_P_HSR))) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 54 | return false; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 55 | |
| 56 | /* Get the supervision header from correct location. */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 57 | if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */ |
Andreas Oetken | eafaa88 | 2021-10-25 20:56:18 +0200 | [diff] [blame] | 58 | total_length = sizeof(struct hsrv1_ethhdr_sp); |
| 59 | if (!pskb_may_pull(skb, total_length)) |
| 60 | return false; |
| 61 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 62 | hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb); |
| 63 | if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP)) |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 64 | return false; |
| 65 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 66 | hsr_sup_tag = &hsr_V1_hdr->hsr_sup; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 67 | } else { |
Andreas Oetken | eafaa88 | 2021-10-25 20:56:18 +0200 | [diff] [blame] | 68 | total_length = sizeof(struct hsrv0_ethhdr_sp); |
| 69 | if (!pskb_may_pull(skb, total_length)) |
| 70 | return false; |
| 71 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 72 | hsr_sup_tag = |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 73 | &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 74 | } |
| 75 | |
Andreas Oetken | eafaa88 | 2021-10-25 20:56:18 +0200 | [diff] [blame] | 76 | if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE && |
| 77 | hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK && |
| 78 | hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD && |
| 79 | hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 80 | return false; |
Andreas Oetken | eafaa88 | 2021-10-25 20:56:18 +0200 | [diff] [blame] | 81 | if (hsr_sup_tag->tlv.HSR_TLV_length != 12 && |
| 82 | hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload)) |
| 83 | return false; |
| 84 | |
| 85 | /* Get next tlv */ |
| 86 | total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length; |
| 87 | if (!pskb_may_pull(skb, total_length)) |
| 88 | return false; |
| 89 | skb_pull(skb, total_length); |
| 90 | hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data; |
| 91 | skb_push(skb, total_length); |
| 92 | |
| 93 | /* if this is a redbox supervision frame we need to verify |
| 94 | * that more data is available |
| 95 | */ |
| 96 | if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) { |
| 97 | /* tlv length must be a length of a mac address */ |
| 98 | if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload)) |
| 99 | return false; |
| 100 | |
| 101 | /* make sure another tlv follows */ |
| 102 | total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length; |
| 103 | if (!pskb_may_pull(skb, total_length)) |
| 104 | return false; |
| 105 | |
| 106 | /* get next tlv */ |
| 107 | skb_pull(skb, total_length); |
| 108 | hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data; |
| 109 | skb_push(skb, total_length); |
| 110 | } |
| 111 | |
| 112 | /* end of tlvs must follow at the end */ |
| 113 | if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT && |
| 114 | hsr_sup_tlv->HSR_TLV_length != 0) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 115 | return false; |
| 116 | |
| 117 | return true; |
| 118 | } |
| 119 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 120 | static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in, |
| 121 | struct hsr_frame_info *frame) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 122 | { |
| 123 | struct sk_buff *skb; |
| 124 | int copylen; |
| 125 | unsigned char *dst, *src; |
| 126 | |
| 127 | skb_pull(skb_in, HSR_HLEN); |
| 128 | skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC); |
| 129 | skb_push(skb_in, HSR_HLEN); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 130 | if (!skb) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 131 | return NULL; |
| 132 | |
| 133 | skb_reset_mac_header(skb); |
| 134 | |
| 135 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 136 | skb->csum_start -= HSR_HLEN; |
| 137 | |
Murali Karicheri | d131fcc | 2019-04-05 13:31:31 -0400 | [diff] [blame] | 138 | copylen = 2 * ETH_ALEN; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 139 | if (frame->is_vlan) |
| 140 | copylen += VLAN_HLEN; |
| 141 | src = skb_mac_header(skb_in); |
| 142 | dst = skb_mac_header(skb); |
| 143 | memcpy(dst, src, copylen); |
| 144 | |
| 145 | skb->protocol = eth_hdr(skb)->h_proto; |
| 146 | return skb; |
| 147 | } |
| 148 | |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 149 | struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, |
| 150 | struct hsr_port *port) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 151 | { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 152 | if (!frame->skb_std) { |
| 153 | if (frame->skb_hsr) { |
| 154 | frame->skb_std = |
| 155 | create_stripped_skb_hsr(frame->skb_hsr, frame); |
| 156 | } else { |
| 157 | /* Unexpected */ |
| 158 | WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", |
| 159 | __FILE__, __LINE__, port->dev->name); |
| 160 | return NULL; |
| 161 | } |
| 162 | } |
| 163 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 164 | return skb_clone(frame->skb_std, GFP_ATOMIC); |
| 165 | } |
| 166 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 167 | struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame, |
| 168 | struct hsr_port *port) |
| 169 | { |
| 170 | if (!frame->skb_std) { |
| 171 | if (frame->skb_prp) { |
| 172 | /* trim the skb by len - HSR_HLEN to exclude RCT */ |
| 173 | skb_trim(frame->skb_prp, |
| 174 | frame->skb_prp->len - HSR_HLEN); |
| 175 | frame->skb_std = |
| 176 | __pskb_copy(frame->skb_prp, |
| 177 | skb_headroom(frame->skb_prp), |
| 178 | GFP_ATOMIC); |
| 179 | } else { |
| 180 | /* Unexpected */ |
| 181 | WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", |
| 182 | __FILE__, __LINE__, port->dev->name); |
| 183 | return NULL; |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | return skb_clone(frame->skb_std, GFP_ATOMIC); |
| 188 | } |
| 189 | |
| 190 | static void prp_set_lan_id(struct prp_rct *trailer, |
| 191 | struct hsr_port *port) |
| 192 | { |
| 193 | int lane_id; |
| 194 | |
| 195 | if (port->type == HSR_PT_SLAVE_A) |
| 196 | lane_id = 0; |
| 197 | else |
| 198 | lane_id = 1; |
| 199 | |
| 200 | /* Add net_id in the upper 3 bits of lane_id */ |
| 201 | lane_id |= port->hsr->net_id; |
| 202 | set_prp_lan_id(trailer, lane_id); |
| 203 | } |
| 204 | |
| 205 | /* Tailroom for PRP rct should have been created before calling this */ |
| 206 | static struct sk_buff *prp_fill_rct(struct sk_buff *skb, |
| 207 | struct hsr_frame_info *frame, |
| 208 | struct hsr_port *port) |
| 209 | { |
| 210 | struct prp_rct *trailer; |
| 211 | int min_size = ETH_ZLEN; |
| 212 | int lsdu_size; |
| 213 | |
| 214 | if (!skb) |
| 215 | return skb; |
| 216 | |
| 217 | if (frame->is_vlan) |
| 218 | min_size = VLAN_ETH_ZLEN; |
| 219 | |
| 220 | if (skb_put_padto(skb, min_size)) |
| 221 | return NULL; |
| 222 | |
| 223 | trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN); |
| 224 | lsdu_size = skb->len - 14; |
| 225 | if (frame->is_vlan) |
| 226 | lsdu_size -= 4; |
| 227 | prp_set_lan_id(trailer, port); |
| 228 | set_prp_LSDU_size(trailer, lsdu_size); |
| 229 | trailer->sequence_nr = htons(frame->sequence_nr); |
| 230 | trailer->PRP_suffix = htons(ETH_P_PRP); |
George McCollister | 78be921 | 2021-02-09 19:02:10 -0600 | [diff] [blame] | 231 | skb->protocol = eth_hdr(skb)->h_proto; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 232 | |
| 233 | return skb; |
| 234 | } |
| 235 | |
| 236 | static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr, |
| 237 | struct hsr_port *port) |
| 238 | { |
| 239 | int path_id; |
| 240 | |
| 241 | if (port->type == HSR_PT_SLAVE_A) |
| 242 | path_id = 0; |
| 243 | else |
| 244 | path_id = 1; |
| 245 | |
| 246 | set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id); |
| 247 | } |
| 248 | |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 249 | static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, |
| 250 | struct hsr_frame_info *frame, |
| 251 | struct hsr_port *port, u8 proto_version) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 252 | { |
| 253 | struct hsr_ethhdr *hsr_ethhdr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 254 | int lsdu_size; |
| 255 | |
Murali Karicheri | 6d6148bc | 2020-07-17 10:55:09 -0400 | [diff] [blame] | 256 | /* pad to minimum packet size which is 60 + 6 (HSR tag) */ |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 257 | if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) |
| 258 | return NULL; |
Murali Karicheri | 6d6148bc | 2020-07-17 10:55:09 -0400 | [diff] [blame] | 259 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 260 | lsdu_size = skb->len - 14; |
| 261 | if (frame->is_vlan) |
| 262 | lsdu_size -= 4; |
| 263 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 264 | hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 265 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 266 | hsr_set_path_id(hsr_ethhdr, port); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 267 | set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size); |
| 268 | hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr); |
| 269 | hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 270 | hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 271 | ETH_P_HSR : ETH_P_PRP); |
George McCollister | 78be921 | 2021-02-09 19:02:10 -0600 | [diff] [blame] | 272 | skb->protocol = hsr_ethhdr->ethhdr.h_proto; |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 273 | |
| 274 | return skb; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 275 | } |
| 276 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 277 | /* If the original frame was an HSR tagged frame, just clone it to be sent |
| 278 | * unchanged. Otherwise, create a private frame especially tagged for 'port'. |
| 279 | */ |
| 280 | struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, |
| 281 | struct hsr_port *port) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 282 | { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 283 | unsigned char *dst, *src; |
| 284 | struct sk_buff *skb; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 285 | int movelen; |
| 286 | |
| 287 | if (frame->skb_hsr) { |
| 288 | struct hsr_ethhdr *hsr_ethhdr = |
| 289 | (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr); |
| 290 | |
| 291 | /* set the lane id properly */ |
| 292 | hsr_set_path_id(hsr_ethhdr, port); |
| 293 | return skb_clone(frame->skb_hsr, GFP_ATOMIC); |
George McCollister | dcf0cd1 | 2021-02-09 19:02:11 -0600 | [diff] [blame] | 294 | } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) { |
| 295 | return skb_clone(frame->skb_std, GFP_ATOMIC); |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 296 | } |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 297 | |
| 298 | /* Create the new skb with enough headroom to fit the HSR tag */ |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 299 | skb = __pskb_copy(frame->skb_std, |
| 300 | skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 301 | if (!skb) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 302 | return NULL; |
| 303 | skb_reset_mac_header(skb); |
| 304 | |
| 305 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 306 | skb->csum_start += HSR_HLEN; |
| 307 | |
| 308 | movelen = ETH_HLEN; |
| 309 | if (frame->is_vlan) |
| 310 | movelen += VLAN_HLEN; |
| 311 | |
| 312 | src = skb_mac_header(skb); |
| 313 | dst = skb_push(skb, HSR_HLEN); |
| 314 | memmove(dst, src, movelen); |
| 315 | skb_reset_mac_header(skb); |
| 316 | |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 317 | /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in |
| 318 | * that case |
| 319 | */ |
| 320 | return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 321 | } |
| 322 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 323 | struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame, |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 324 | struct hsr_port *port) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 325 | { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 326 | struct sk_buff *skb; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 327 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 328 | if (frame->skb_prp) { |
| 329 | struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp); |
| 330 | |
| 331 | if (trailer) { |
| 332 | prp_set_lan_id(trailer, port); |
| 333 | } else { |
| 334 | WARN_ONCE(!trailer, "errored PRP skb"); |
| 335 | return NULL; |
| 336 | } |
| 337 | return skb_clone(frame->skb_prp, GFP_ATOMIC); |
George McCollister | dcf0cd1 | 2021-02-09 19:02:11 -0600 | [diff] [blame] | 338 | } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) { |
| 339 | return skb_clone(frame->skb_std, GFP_ATOMIC); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 340 | } |
| 341 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 342 | skb = skb_copy_expand(frame->skb_std, 0, |
| 343 | skb_tailroom(frame->skb_std) + HSR_HLEN, |
| 344 | GFP_ATOMIC); |
| 345 | prp_fill_rct(skb, frame, port); |
| 346 | |
| 347 | return skb; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 348 | } |
| 349 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 350 | static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, |
| 351 | struct hsr_node *node_src) |
| 352 | { |
| 353 | bool was_multicast_frame; |
| 354 | int res; |
| 355 | |
| 356 | was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); |
| 357 | hsr_addr_subst_source(node_src, skb); |
| 358 | skb_pull(skb, ETH_HLEN); |
| 359 | res = netif_rx(skb); |
| 360 | if (res == NET_RX_DROP) { |
| 361 | dev->stats.rx_dropped++; |
| 362 | } else { |
| 363 | dev->stats.rx_packets++; |
| 364 | dev->stats.rx_bytes += skb->len; |
| 365 | if (was_multicast_frame) |
| 366 | dev->stats.multicast++; |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, |
| 371 | struct hsr_frame_info *frame) |
| 372 | { |
| 373 | if (frame->port_rcv->type == HSR_PT_MASTER) { |
| 374 | hsr_addr_subst_dest(frame->node_src, skb, port); |
| 375 | |
| 376 | /* Address substitution (IEC62439-3 pp 26, 50): replace mac |
| 377 | * address of outgoing frame with that of the outgoing slave's. |
| 378 | */ |
| 379 | ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr); |
| 380 | } |
| 381 | return dev_queue_xmit(skb); |
| 382 | } |
| 383 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 384 | bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port) |
| 385 | { |
| 386 | return ((frame->port_rcv->type == HSR_PT_SLAVE_A && |
| 387 | port->type == HSR_PT_SLAVE_B) || |
| 388 | (frame->port_rcv->type == HSR_PT_SLAVE_B && |
| 389 | port->type == HSR_PT_SLAVE_A)); |
| 390 | } |
| 391 | |
George McCollister | dcf0cd1 | 2021-02-09 19:02:11 -0600 | [diff] [blame] | 392 | bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port) |
| 393 | { |
| 394 | if (port->dev->features & NETIF_F_HW_HSR_FWD) |
| 395 | return prp_drop_frame(frame, port); |
| 396 | |
| 397 | return false; |
| 398 | } |
| 399 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 400 | /* Forward the frame through all devices except: |
| 401 | * - Back through the receiving device |
| 402 | * - If it's a HSR frame: through a device where it has passed before |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 403 | * - if it's a PRP frame: through another PRP slave device (no bridge) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 404 | * - To the local HSR master only if the frame is directly addressed to it, or |
| 405 | * a non-supervision multicast or broadcast frame. |
| 406 | * |
| 407 | * HSR slave devices should insert a HSR tag into the frame, or forward the |
| 408 | * frame unchanged if it's already tagged. Interlink devices should strip HSR |
| 409 | * tags if they're of the non-HSR type (but only after duplicate discard). The |
| 410 | * master device always strips HSR tags. |
| 411 | */ |
| 412 | static void hsr_forward_do(struct hsr_frame_info *frame) |
| 413 | { |
| 414 | struct hsr_port *port; |
| 415 | struct sk_buff *skb; |
George McCollister | dcf0cd1 | 2021-02-09 19:02:11 -0600 | [diff] [blame] | 416 | bool sent = false; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 417 | |
| 418 | hsr_for_each_port(frame->port_rcv->hsr, port) { |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 419 | struct hsr_priv *hsr = port->hsr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 420 | /* Don't send frame back the way it came */ |
| 421 | if (port == frame->port_rcv) |
| 422 | continue; |
| 423 | |
| 424 | /* Don't deliver locally unless we should */ |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 425 | if (port->type == HSR_PT_MASTER && !frame->is_local_dest) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 426 | continue; |
| 427 | |
| 428 | /* Deliver frames directly addressed to us to master only */ |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 429 | if (port->type != HSR_PT_MASTER && frame->is_local_exclusive) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 430 | continue; |
| 431 | |
George McCollister | dcf0cd1 | 2021-02-09 19:02:11 -0600 | [diff] [blame] | 432 | /* If hardware duplicate generation is enabled, only send out |
| 433 | * one port. |
| 434 | */ |
| 435 | if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent) |
| 436 | continue; |
| 437 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 438 | /* Don't send frame over port where it has been sent before. |
| 439 | * Also fro SAN, this shouldn't be done. |
| 440 | */ |
| 441 | if (!frame->is_from_san && |
| 442 | hsr_register_frame_out(port, frame->node_src, |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 443 | frame->sequence_nr)) |
| 444 | continue; |
| 445 | |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 446 | if (frame->is_supervision && port->type == HSR_PT_MASTER) { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 447 | hsr_handle_sup_frame(frame); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 448 | continue; |
| 449 | } |
| 450 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 451 | /* Check if frame is to be dropped. Eg. for PRP no forward |
| 452 | * between ports. |
| 453 | */ |
| 454 | if (hsr->proto_ops->drop_frame && |
| 455 | hsr->proto_ops->drop_frame(frame, port)) |
| 456 | continue; |
| 457 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 458 | if (port->type != HSR_PT_MASTER) |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 459 | skb = hsr->proto_ops->create_tagged_frame(frame, port); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 460 | else |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 461 | skb = hsr->proto_ops->get_untagged_frame(frame, port); |
| 462 | |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 463 | if (!skb) { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 464 | frame->port_rcv->dev->stats.rx_dropped++; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 465 | continue; |
| 466 | } |
| 467 | |
| 468 | skb->dev = port->dev; |
George McCollister | dcf0cd1 | 2021-02-09 19:02:11 -0600 | [diff] [blame] | 469 | if (port->type == HSR_PT_MASTER) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 470 | hsr_deliver_master(skb, port->dev, frame->node_src); |
George McCollister | dcf0cd1 | 2021-02-09 19:02:11 -0600 | [diff] [blame] | 471 | } else { |
| 472 | if (!hsr_xmit(skb, port, frame)) |
| 473 | sent = true; |
| 474 | } |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 475 | } |
| 476 | } |
| 477 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 478 | static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, |
| 479 | struct hsr_frame_info *frame) |
| 480 | { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 481 | if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { |
| 482 | frame->is_local_exclusive = true; |
| 483 | skb->pkt_type = PACKET_HOST; |
| 484 | } else { |
| 485 | frame->is_local_exclusive = false; |
| 486 | } |
| 487 | |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 488 | if (skb->pkt_type == PACKET_HOST || |
| 489 | skb->pkt_type == PACKET_MULTICAST || |
| 490 | skb->pkt_type == PACKET_BROADCAST) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 491 | frame->is_local_dest = true; |
| 492 | } else { |
| 493 | frame->is_local_dest = false; |
| 494 | } |
| 495 | } |
| 496 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 497 | static void handle_std_frame(struct sk_buff *skb, |
| 498 | struct hsr_frame_info *frame) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 499 | { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 500 | struct hsr_port *port = frame->port_rcv; |
| 501 | struct hsr_priv *hsr = port->hsr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 502 | unsigned long irqflags; |
| 503 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 504 | frame->skb_hsr = NULL; |
| 505 | frame->skb_prp = NULL; |
| 506 | frame->skb_std = skb; |
| 507 | |
| 508 | if (port->type != HSR_PT_MASTER) { |
| 509 | frame->is_from_san = true; |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 510 | } else { |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 511 | /* Sequence nr for the master node */ |
| 512 | spin_lock_irqsave(&hsr->seqnr_lock, irqflags); |
| 513 | frame->sequence_nr = hsr->sequence_nr; |
| 514 | hsr->sequence_nr++; |
| 515 | spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags); |
| 516 | } |
| 517 | } |
| 518 | |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 519 | int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, |
| 520 | struct hsr_frame_info *frame) |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 521 | { |
George McCollister | 78be921 | 2021-02-09 19:02:10 -0600 | [diff] [blame] | 522 | struct hsr_port *port = frame->port_rcv; |
| 523 | struct hsr_priv *hsr = port->hsr; |
| 524 | |
| 525 | /* HSRv0 supervisory frames double as a tag so treat them as tagged. */ |
| 526 | if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) || |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 527 | proto == htons(ETH_P_HSR)) { |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 528 | /* Check if skb contains hsr_ethhdr */ |
| 529 | if (skb->mac_len < sizeof(struct hsr_ethhdr)) |
| 530 | return -EINVAL; |
| 531 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 532 | /* HSR tagged frame :- Data or Supervision */ |
| 533 | frame->skb_std = NULL; |
| 534 | frame->skb_prp = NULL; |
| 535 | frame->skb_hsr = skb; |
| 536 | frame->sequence_nr = hsr_get_skb_sequence_nr(skb); |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 537 | return 0; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 538 | } |
| 539 | |
| 540 | /* Standard frame or PRP from master port */ |
| 541 | handle_std_frame(skb, frame); |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 542 | |
| 543 | return 0; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 544 | } |
| 545 | |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 546 | int prp_fill_frame_info(__be16 proto, struct sk_buff *skb, |
| 547 | struct hsr_frame_info *frame) |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 548 | { |
| 549 | /* Supervision frame */ |
| 550 | struct prp_rct *rct = skb_get_PRP_rct(skb); |
| 551 | |
| 552 | if (rct && |
| 553 | prp_check_lsdu_size(skb, rct, frame->is_supervision)) { |
| 554 | frame->skb_hsr = NULL; |
| 555 | frame->skb_std = NULL; |
| 556 | frame->skb_prp = skb; |
| 557 | frame->sequence_nr = prp_get_skb_sequence_nr(rct); |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 558 | return 0; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 559 | } |
| 560 | handle_std_frame(skb, frame); |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 561 | |
| 562 | return 0; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 563 | } |
| 564 | |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 565 | static int fill_frame_info(struct hsr_frame_info *frame, |
| 566 | struct sk_buff *skb, struct hsr_port *port) |
| 567 | { |
| 568 | struct hsr_priv *hsr = port->hsr; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 569 | struct hsr_vlan_ethhdr *vlan_hdr; |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 570 | struct ethhdr *ethhdr; |
| 571 | __be16 proto; |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 572 | int ret; |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 573 | |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 574 | /* Check if skb contains ethhdr */ |
| 575 | if (skb->mac_len < sizeof(struct ethhdr)) |
Phillip Potter | 2e9f609 | 2021-05-02 22:34:42 +0100 | [diff] [blame] | 576 | return -EINVAL; |
| 577 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 578 | memset(frame, 0, sizeof(*frame)); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 579 | frame->is_supervision = is_supervision_frame(port->hsr, skb); |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 580 | frame->node_src = hsr_get_node(port, &hsr->node_db, skb, |
| 581 | frame->is_supervision, |
| 582 | port->type); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 583 | if (!frame->node_src) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 584 | return -1; /* Unknown node and !is_supervision, or no mem */ |
| 585 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 586 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 587 | frame->is_vlan = false; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 588 | proto = ethhdr->h_proto; |
| 589 | |
| 590 | if (proto == htons(ETH_P_8021Q)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 591 | frame->is_vlan = true; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 592 | |
| 593 | if (frame->is_vlan) { |
| 594 | vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr; |
| 595 | proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 596 | /* FIXME: */ |
Taehee Yoo | f96e871 | 2020-05-06 15:47:45 +0000 | [diff] [blame] | 597 | netdev_warn_once(skb->dev, "VLAN not yet supported"); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 598 | } |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 599 | |
| 600 | frame->is_from_san = false; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 601 | frame->port_rcv = port; |
George McCollister | 48b491a | 2021-05-24 13:50:54 -0500 | [diff] [blame] | 602 | ret = hsr->proto_ops->fill_frame_info(proto, skb, frame); |
| 603 | if (ret) |
| 604 | return ret; |
| 605 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 606 | check_local_dest(port->hsr, skb, frame); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 607 | |
| 608 | return 0; |
| 609 | } |
| 610 | |
| 611 | /* Must be called holding rcu read lock (because of the port parameter) */ |
| 612 | void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) |
| 613 | { |
| 614 | struct hsr_frame_info frame; |
| 615 | |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 616 | if (fill_frame_info(&frame, skb, port) < 0) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 617 | goto out_drop; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 618 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 619 | hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); |
| 620 | hsr_forward_do(&frame); |
Murali Karicheri | ee2c46f | 2019-04-15 11:36:03 -0400 | [diff] [blame] | 621 | /* Gets called for ingress frames as well as egress from master port. |
| 622 | * So check and increment stats for master port only here. |
| 623 | */ |
| 624 | if (port->type == HSR_PT_MASTER) { |
| 625 | port->dev->stats.tx_packets++; |
| 626 | port->dev->stats.tx_bytes += skb->len; |
| 627 | } |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 628 | |
Wang Hai | 2b96692 | 2020-07-18 20:53:38 +0800 | [diff] [blame] | 629 | kfree_skb(frame.skb_hsr); |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 630 | kfree_skb(frame.skb_prp); |
Wang Hai | 2b96692 | 2020-07-18 20:53:38 +0800 | [diff] [blame] | 631 | kfree_skb(frame.skb_std); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 632 | return; |
| 633 | |
| 634 | out_drop: |
| 635 | port->dev->stats.tx_dropped++; |
| 636 | kfree_skb(skb); |
| 637 | } |