Murali Karicheri | 0e7623b | 2019-04-05 13:31:34 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 2 | /* Copyright 2011-2014 Autronica Fire and Security AS |
| 3 | * |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 4 | * Author(s): |
| 5 | * 2011-2014 Arvid Brodin, arvid.brodin@alten.se |
Murali Karicheri | 8f4c0e0 | 2020-07-22 10:40:16 -0400 | [diff] [blame] | 6 | * |
| 7 | * Frame router for HSR and PRP. |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include "hsr_forward.h" |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/skbuff.h> |
| 13 | #include <linux/etherdevice.h> |
| 14 | #include <linux/if_vlan.h> |
| 15 | #include "hsr_main.h" |
| 16 | #include "hsr_framereg.h" |
| 17 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 18 | struct hsr_node; |
| 19 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 20 | /* The uses I can see for these HSR supervision frames are: |
| 21 | * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type = |
| 22 | * 22") to reset any sequence_nr counters belonging to that node. Useful if |
| 23 | * the other node's counter has been reset for some reason. |
| 24 | * -- |
| 25 | * Or not - resetting the counter and bridging the frame would create a |
| 26 | * loop, unfortunately. |
| 27 | * |
| 28 | * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck |
| 29 | * frame is received from a particular node, we know something is wrong. |
| 30 | * We just register these (as with normal frames) and throw them away. |
| 31 | * |
| 32 | * 3) Allow different MAC addresses for the two slave interfaces, using the |
| 33 | * MacAddressA field. |
| 34 | */ |
| 35 | static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) |
| 36 | { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 37 | struct ethhdr *eth_hdr; |
| 38 | struct hsr_sup_tag *hsr_sup_tag; |
| 39 | struct hsrv1_ethhdr_sp *hsr_V1_hdr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 40 | |
| 41 | WARN_ON_ONCE(!skb_mac_header_was_set(skb)); |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 42 | eth_hdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 43 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 44 | /* Correct addr? */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 45 | if (!ether_addr_equal(eth_hdr->h_dest, |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 46 | hsr->sup_multicast_addr)) |
| 47 | return false; |
| 48 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 49 | /* Correct ether type?. */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 50 | if (!(eth_hdr->h_proto == htons(ETH_P_PRP) || |
| 51 | eth_hdr->h_proto == htons(ETH_P_HSR))) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 52 | return false; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 53 | |
| 54 | /* Get the supervision header from correct location. */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 55 | if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */ |
| 56 | hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb); |
| 57 | if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP)) |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 58 | return false; |
| 59 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 60 | hsr_sup_tag = &hsr_V1_hdr->hsr_sup; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 61 | } else { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 62 | hsr_sup_tag = |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 63 | &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 64 | } |
| 65 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 66 | if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE && |
Murali Karicheri | c643ff0 | 2020-07-22 10:40:19 -0400 | [diff] [blame] | 67 | hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK && |
| 68 | hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD && |
| 69 | hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 70 | return false; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 71 | if (hsr_sup_tag->HSR_TLV_length != 12 && |
| 72 | hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 73 | return false; |
| 74 | |
| 75 | return true; |
| 76 | } |
| 77 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 78 | static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in, |
| 79 | struct hsr_frame_info *frame) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 80 | { |
| 81 | struct sk_buff *skb; |
| 82 | int copylen; |
| 83 | unsigned char *dst, *src; |
| 84 | |
| 85 | skb_pull(skb_in, HSR_HLEN); |
| 86 | skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC); |
| 87 | skb_push(skb_in, HSR_HLEN); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 88 | if (!skb) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 89 | return NULL; |
| 90 | |
| 91 | skb_reset_mac_header(skb); |
| 92 | |
| 93 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 94 | skb->csum_start -= HSR_HLEN; |
| 95 | |
Murali Karicheri | d131fcc | 2019-04-05 13:31:31 -0400 | [diff] [blame] | 96 | copylen = 2 * ETH_ALEN; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 97 | if (frame->is_vlan) |
| 98 | copylen += VLAN_HLEN; |
| 99 | src = skb_mac_header(skb_in); |
| 100 | dst = skb_mac_header(skb); |
| 101 | memcpy(dst, src, copylen); |
| 102 | |
| 103 | skb->protocol = eth_hdr(skb)->h_proto; |
| 104 | return skb; |
| 105 | } |
| 106 | |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 107 | struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, |
| 108 | struct hsr_port *port) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 109 | { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 110 | if (!frame->skb_std) { |
| 111 | if (frame->skb_hsr) { |
| 112 | frame->skb_std = |
| 113 | create_stripped_skb_hsr(frame->skb_hsr, frame); |
| 114 | } else { |
| 115 | /* Unexpected */ |
| 116 | WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", |
| 117 | __FILE__, __LINE__, port->dev->name); |
| 118 | return NULL; |
| 119 | } |
| 120 | } |
| 121 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 122 | return skb_clone(frame->skb_std, GFP_ATOMIC); |
| 123 | } |
| 124 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 125 | struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame, |
| 126 | struct hsr_port *port) |
| 127 | { |
| 128 | if (!frame->skb_std) { |
| 129 | if (frame->skb_prp) { |
| 130 | /* trim the skb by len - HSR_HLEN to exclude RCT */ |
| 131 | skb_trim(frame->skb_prp, |
| 132 | frame->skb_prp->len - HSR_HLEN); |
| 133 | frame->skb_std = |
| 134 | __pskb_copy(frame->skb_prp, |
| 135 | skb_headroom(frame->skb_prp), |
| 136 | GFP_ATOMIC); |
| 137 | } else { |
| 138 | /* Unexpected */ |
| 139 | WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", |
| 140 | __FILE__, __LINE__, port->dev->name); |
| 141 | return NULL; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | return skb_clone(frame->skb_std, GFP_ATOMIC); |
| 146 | } |
| 147 | |
| 148 | static void prp_set_lan_id(struct prp_rct *trailer, |
| 149 | struct hsr_port *port) |
| 150 | { |
| 151 | int lane_id; |
| 152 | |
| 153 | if (port->type == HSR_PT_SLAVE_A) |
| 154 | lane_id = 0; |
| 155 | else |
| 156 | lane_id = 1; |
| 157 | |
| 158 | /* Add net_id in the upper 3 bits of lane_id */ |
| 159 | lane_id |= port->hsr->net_id; |
| 160 | set_prp_lan_id(trailer, lane_id); |
| 161 | } |
| 162 | |
| 163 | /* Tailroom for PRP rct should have been created before calling this */ |
| 164 | static struct sk_buff *prp_fill_rct(struct sk_buff *skb, |
| 165 | struct hsr_frame_info *frame, |
| 166 | struct hsr_port *port) |
| 167 | { |
| 168 | struct prp_rct *trailer; |
| 169 | int min_size = ETH_ZLEN; |
| 170 | int lsdu_size; |
| 171 | |
| 172 | if (!skb) |
| 173 | return skb; |
| 174 | |
| 175 | if (frame->is_vlan) |
| 176 | min_size = VLAN_ETH_ZLEN; |
| 177 | |
| 178 | if (skb_put_padto(skb, min_size)) |
| 179 | return NULL; |
| 180 | |
| 181 | trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN); |
| 182 | lsdu_size = skb->len - 14; |
| 183 | if (frame->is_vlan) |
| 184 | lsdu_size -= 4; |
| 185 | prp_set_lan_id(trailer, port); |
| 186 | set_prp_LSDU_size(trailer, lsdu_size); |
| 187 | trailer->sequence_nr = htons(frame->sequence_nr); |
| 188 | trailer->PRP_suffix = htons(ETH_P_PRP); |
| 189 | |
| 190 | return skb; |
| 191 | } |
| 192 | |
| 193 | static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr, |
| 194 | struct hsr_port *port) |
| 195 | { |
| 196 | int path_id; |
| 197 | |
| 198 | if (port->type == HSR_PT_SLAVE_A) |
| 199 | path_id = 0; |
| 200 | else |
| 201 | path_id = 1; |
| 202 | |
| 203 | set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id); |
| 204 | } |
| 205 | |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 206 | static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, |
| 207 | struct hsr_frame_info *frame, |
| 208 | struct hsr_port *port, u8 proto_version) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 209 | { |
| 210 | struct hsr_ethhdr *hsr_ethhdr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 211 | int lsdu_size; |
| 212 | |
Murali Karicheri | 6d6148bc | 2020-07-17 10:55:09 -0400 | [diff] [blame] | 213 | /* pad to minimum packet size which is 60 + 6 (HSR tag) */ |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 214 | if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) |
| 215 | return NULL; |
Murali Karicheri | 6d6148bc | 2020-07-17 10:55:09 -0400 | [diff] [blame] | 216 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 217 | lsdu_size = skb->len - 14; |
| 218 | if (frame->is_vlan) |
| 219 | lsdu_size -= 4; |
| 220 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 221 | hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 222 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 223 | hsr_set_path_id(hsr_ethhdr, port); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 224 | set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size); |
| 225 | hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr); |
| 226 | hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 227 | hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 228 | ETH_P_HSR : ETH_P_PRP); |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 229 | |
| 230 | return skb; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 231 | } |
| 232 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 233 | /* If the original frame was an HSR tagged frame, just clone it to be sent |
| 234 | * unchanged. Otherwise, create a private frame especially tagged for 'port'. |
| 235 | */ |
| 236 | struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, |
| 237 | struct hsr_port *port) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 238 | { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 239 | unsigned char *dst, *src; |
| 240 | struct sk_buff *skb; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 241 | int movelen; |
| 242 | |
| 243 | if (frame->skb_hsr) { |
| 244 | struct hsr_ethhdr *hsr_ethhdr = |
| 245 | (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr); |
| 246 | |
| 247 | /* set the lane id properly */ |
| 248 | hsr_set_path_id(hsr_ethhdr, port); |
| 249 | return skb_clone(frame->skb_hsr, GFP_ATOMIC); |
| 250 | } |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 251 | |
| 252 | /* Create the new skb with enough headroom to fit the HSR tag */ |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 253 | skb = __pskb_copy(frame->skb_std, |
| 254 | skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 255 | if (!skb) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 256 | return NULL; |
| 257 | skb_reset_mac_header(skb); |
| 258 | |
| 259 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 260 | skb->csum_start += HSR_HLEN; |
| 261 | |
| 262 | movelen = ETH_HLEN; |
| 263 | if (frame->is_vlan) |
| 264 | movelen += VLAN_HLEN; |
| 265 | |
| 266 | src = skb_mac_header(skb); |
| 267 | dst = skb_push(skb, HSR_HLEN); |
| 268 | memmove(dst, src, movelen); |
| 269 | skb_reset_mac_header(skb); |
| 270 | |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 271 | /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in |
| 272 | * that case |
| 273 | */ |
| 274 | return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 275 | } |
| 276 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 277 | struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame, |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 278 | struct hsr_port *port) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 279 | { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 280 | struct sk_buff *skb; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 281 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 282 | if (frame->skb_prp) { |
| 283 | struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp); |
| 284 | |
| 285 | if (trailer) { |
| 286 | prp_set_lan_id(trailer, port); |
| 287 | } else { |
| 288 | WARN_ONCE(!trailer, "errored PRP skb"); |
| 289 | return NULL; |
| 290 | } |
| 291 | return skb_clone(frame->skb_prp, GFP_ATOMIC); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 292 | } |
| 293 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 294 | skb = skb_copy_expand(frame->skb_std, 0, |
| 295 | skb_tailroom(frame->skb_std) + HSR_HLEN, |
| 296 | GFP_ATOMIC); |
| 297 | prp_fill_rct(skb, frame, port); |
| 298 | |
| 299 | return skb; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 300 | } |
| 301 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 302 | static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, |
| 303 | struct hsr_node *node_src) |
| 304 | { |
| 305 | bool was_multicast_frame; |
| 306 | int res; |
| 307 | |
| 308 | was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); |
| 309 | hsr_addr_subst_source(node_src, skb); |
| 310 | skb_pull(skb, ETH_HLEN); |
| 311 | res = netif_rx(skb); |
| 312 | if (res == NET_RX_DROP) { |
| 313 | dev->stats.rx_dropped++; |
| 314 | } else { |
| 315 | dev->stats.rx_packets++; |
| 316 | dev->stats.rx_bytes += skb->len; |
| 317 | if (was_multicast_frame) |
| 318 | dev->stats.multicast++; |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, |
| 323 | struct hsr_frame_info *frame) |
| 324 | { |
| 325 | if (frame->port_rcv->type == HSR_PT_MASTER) { |
| 326 | hsr_addr_subst_dest(frame->node_src, skb, port); |
| 327 | |
| 328 | /* Address substitution (IEC62439-3 pp 26, 50): replace mac |
| 329 | * address of outgoing frame with that of the outgoing slave's. |
| 330 | */ |
| 331 | ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr); |
| 332 | } |
| 333 | return dev_queue_xmit(skb); |
| 334 | } |
| 335 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 336 | bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port) |
| 337 | { |
| 338 | return ((frame->port_rcv->type == HSR_PT_SLAVE_A && |
| 339 | port->type == HSR_PT_SLAVE_B) || |
| 340 | (frame->port_rcv->type == HSR_PT_SLAVE_B && |
| 341 | port->type == HSR_PT_SLAVE_A)); |
| 342 | } |
| 343 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 344 | /* Forward the frame through all devices except: |
| 345 | * - Back through the receiving device |
| 346 | * - If it's a HSR frame: through a device where it has passed before |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 347 | * - if it's a PRP frame: through another PRP slave device (no bridge) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 348 | * - To the local HSR master only if the frame is directly addressed to it, or |
| 349 | * a non-supervision multicast or broadcast frame. |
| 350 | * |
| 351 | * HSR slave devices should insert a HSR tag into the frame, or forward the |
| 352 | * frame unchanged if it's already tagged. Interlink devices should strip HSR |
| 353 | * tags if they're of the non-HSR type (but only after duplicate discard). The |
| 354 | * master device always strips HSR tags. |
| 355 | */ |
| 356 | static void hsr_forward_do(struct hsr_frame_info *frame) |
| 357 | { |
| 358 | struct hsr_port *port; |
| 359 | struct sk_buff *skb; |
| 360 | |
| 361 | hsr_for_each_port(frame->port_rcv->hsr, port) { |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 362 | struct hsr_priv *hsr = port->hsr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 363 | /* Don't send frame back the way it came */ |
| 364 | if (port == frame->port_rcv) |
| 365 | continue; |
| 366 | |
| 367 | /* Don't deliver locally unless we should */ |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 368 | if (port->type == HSR_PT_MASTER && !frame->is_local_dest) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 369 | continue; |
| 370 | |
| 371 | /* Deliver frames directly addressed to us to master only */ |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 372 | if (port->type != HSR_PT_MASTER && frame->is_local_exclusive) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 373 | continue; |
| 374 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 375 | /* Don't send frame over port where it has been sent before. |
| 376 | * Also fro SAN, this shouldn't be done. |
| 377 | */ |
| 378 | if (!frame->is_from_san && |
| 379 | hsr_register_frame_out(port, frame->node_src, |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 380 | frame->sequence_nr)) |
| 381 | continue; |
| 382 | |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 383 | if (frame->is_supervision && port->type == HSR_PT_MASTER) { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 384 | hsr_handle_sup_frame(frame); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 385 | continue; |
| 386 | } |
| 387 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 388 | /* Check if frame is to be dropped. Eg. for PRP no forward |
| 389 | * between ports. |
| 390 | */ |
| 391 | if (hsr->proto_ops->drop_frame && |
| 392 | hsr->proto_ops->drop_frame(frame, port)) |
| 393 | continue; |
| 394 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 395 | if (port->type != HSR_PT_MASTER) |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 396 | skb = hsr->proto_ops->create_tagged_frame(frame, port); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 397 | else |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 398 | skb = hsr->proto_ops->get_untagged_frame(frame, port); |
| 399 | |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 400 | if (!skb) { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 401 | frame->port_rcv->dev->stats.rx_dropped++; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 402 | continue; |
| 403 | } |
| 404 | |
| 405 | skb->dev = port->dev; |
| 406 | if (port->type == HSR_PT_MASTER) |
| 407 | hsr_deliver_master(skb, port->dev, frame->node_src); |
| 408 | else |
| 409 | hsr_xmit(skb, port, frame); |
| 410 | } |
| 411 | } |
| 412 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 413 | static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, |
| 414 | struct hsr_frame_info *frame) |
| 415 | { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 416 | if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { |
| 417 | frame->is_local_exclusive = true; |
| 418 | skb->pkt_type = PACKET_HOST; |
| 419 | } else { |
| 420 | frame->is_local_exclusive = false; |
| 421 | } |
| 422 | |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 423 | if (skb->pkt_type == PACKET_HOST || |
| 424 | skb->pkt_type == PACKET_MULTICAST || |
| 425 | skb->pkt_type == PACKET_BROADCAST) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 426 | frame->is_local_dest = true; |
| 427 | } else { |
| 428 | frame->is_local_dest = false; |
| 429 | } |
| 430 | } |
| 431 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 432 | static void handle_std_frame(struct sk_buff *skb, |
| 433 | struct hsr_frame_info *frame) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 434 | { |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 435 | struct hsr_port *port = frame->port_rcv; |
| 436 | struct hsr_priv *hsr = port->hsr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 437 | unsigned long irqflags; |
| 438 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 439 | frame->skb_hsr = NULL; |
| 440 | frame->skb_prp = NULL; |
| 441 | frame->skb_std = skb; |
| 442 | |
| 443 | if (port->type != HSR_PT_MASTER) { |
| 444 | frame->is_from_san = true; |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 445 | } else { |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 446 | /* Sequence nr for the master node */ |
| 447 | spin_lock_irqsave(&hsr->seqnr_lock, irqflags); |
| 448 | frame->sequence_nr = hsr->sequence_nr; |
| 449 | hsr->sequence_nr++; |
| 450 | spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags); |
| 451 | } |
| 452 | } |
| 453 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 454 | void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, |
| 455 | struct hsr_frame_info *frame) |
| 456 | { |
| 457 | if (proto == htons(ETH_P_PRP) || |
| 458 | proto == htons(ETH_P_HSR)) { |
| 459 | /* HSR tagged frame :- Data or Supervision */ |
| 460 | frame->skb_std = NULL; |
| 461 | frame->skb_prp = NULL; |
| 462 | frame->skb_hsr = skb; |
| 463 | frame->sequence_nr = hsr_get_skb_sequence_nr(skb); |
| 464 | return; |
| 465 | } |
| 466 | |
| 467 | /* Standard frame or PRP from master port */ |
| 468 | handle_std_frame(skb, frame); |
| 469 | } |
| 470 | |
| 471 | void prp_fill_frame_info(__be16 proto, struct sk_buff *skb, |
| 472 | struct hsr_frame_info *frame) |
| 473 | { |
| 474 | /* Supervision frame */ |
| 475 | struct prp_rct *rct = skb_get_PRP_rct(skb); |
| 476 | |
| 477 | if (rct && |
| 478 | prp_check_lsdu_size(skb, rct, frame->is_supervision)) { |
| 479 | frame->skb_hsr = NULL; |
| 480 | frame->skb_std = NULL; |
| 481 | frame->skb_prp = skb; |
| 482 | frame->sequence_nr = prp_get_skb_sequence_nr(rct); |
| 483 | return; |
| 484 | } |
| 485 | handle_std_frame(skb, frame); |
| 486 | } |
| 487 | |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 488 | static int fill_frame_info(struct hsr_frame_info *frame, |
| 489 | struct sk_buff *skb, struct hsr_port *port) |
| 490 | { |
| 491 | struct hsr_priv *hsr = port->hsr; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 492 | struct hsr_vlan_ethhdr *vlan_hdr; |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 493 | struct ethhdr *ethhdr; |
| 494 | __be16 proto; |
| 495 | |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 496 | memset(frame, 0, sizeof(*frame)); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 497 | frame->is_supervision = is_supervision_frame(port->hsr, skb); |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 498 | frame->node_src = hsr_get_node(port, &hsr->node_db, skb, |
| 499 | frame->is_supervision, |
| 500 | port->type); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 501 | if (!frame->node_src) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 502 | return -1; /* Unknown node and !is_supervision, or no mem */ |
| 503 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 504 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 505 | frame->is_vlan = false; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 506 | proto = ethhdr->h_proto; |
| 507 | |
| 508 | if (proto == htons(ETH_P_8021Q)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 509 | frame->is_vlan = true; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 510 | |
| 511 | if (frame->is_vlan) { |
| 512 | vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr; |
| 513 | proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 514 | /* FIXME: */ |
Taehee Yoo | f96e871 | 2020-05-06 15:47:45 +0000 | [diff] [blame] | 515 | netdev_warn_once(skb->dev, "VLAN not yet supported"); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 516 | } |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 517 | |
| 518 | frame->is_from_san = false; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 519 | frame->port_rcv = port; |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 520 | hsr->proto_ops->fill_frame_info(proto, skb, frame); |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 521 | check_local_dest(port->hsr, skb, frame); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 522 | |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | /* Must be called holding rcu read lock (because of the port parameter) */ |
| 527 | void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) |
| 528 | { |
| 529 | struct hsr_frame_info frame; |
| 530 | |
| 531 | if (skb_mac_header(skb) != skb->data) { |
| 532 | WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n", |
| 533 | __FILE__, __LINE__, port->dev->name); |
| 534 | goto out_drop; |
| 535 | } |
| 536 | |
Murali Karicheri | fa4dc89 | 2020-07-22 10:40:20 -0400 | [diff] [blame] | 537 | if (fill_frame_info(&frame, skb, port) < 0) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 538 | goto out_drop; |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 539 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 540 | hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); |
| 541 | hsr_forward_do(&frame); |
Murali Karicheri | ee2c46f | 2019-04-15 11:36:03 -0400 | [diff] [blame] | 542 | /* Gets called for ingress frames as well as egress from master port. |
| 543 | * So check and increment stats for master port only here. |
| 544 | */ |
| 545 | if (port->type == HSR_PT_MASTER) { |
| 546 | port->dev->stats.tx_packets++; |
| 547 | port->dev->stats.tx_bytes += skb->len; |
| 548 | } |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 549 | |
Wang Hai | 2b96692 | 2020-07-18 20:53:38 +0800 | [diff] [blame] | 550 | kfree_skb(frame.skb_hsr); |
Murali Karicheri | 451d812 | 2020-07-22 10:40:21 -0400 | [diff] [blame] | 551 | kfree_skb(frame.skb_prp); |
Wang Hai | 2b96692 | 2020-07-18 20:53:38 +0800 | [diff] [blame] | 552 | kfree_skb(frame.skb_std); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 553 | return; |
| 554 | |
| 555 | out_drop: |
| 556 | port->dev->stats.tx_dropped++; |
| 557 | kfree_skb(skb); |
| 558 | } |