Murali Karicheri | 0e7623b | 2019-04-05 13:31:34 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 2 | /* Copyright 2011-2014 Autronica Fire and Security AS |
| 3 | * |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 4 | * Author(s): |
| 5 | * 2011-2014 Arvid Brodin, arvid.brodin@alten.se |
Murali Karicheri | 8f4c0e0 | 2020-07-22 10:40:16 -0400 | [diff] [blame^] | 6 | * |
| 7 | * Frame router for HSR and PRP. |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include "hsr_forward.h" |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/skbuff.h> |
| 13 | #include <linux/etherdevice.h> |
| 14 | #include <linux/if_vlan.h> |
| 15 | #include "hsr_main.h" |
| 16 | #include "hsr_framereg.h" |
| 17 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 18 | struct hsr_node; |
| 19 | |
| 20 | struct hsr_frame_info { |
| 21 | struct sk_buff *skb_std; |
| 22 | struct sk_buff *skb_hsr; |
| 23 | struct hsr_port *port_rcv; |
| 24 | struct hsr_node *node_src; |
| 25 | u16 sequence_nr; |
| 26 | bool is_supervision; |
| 27 | bool is_vlan; |
| 28 | bool is_local_dest; |
| 29 | bool is_local_exclusive; |
| 30 | }; |
| 31 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 32 | /* The uses I can see for these HSR supervision frames are: |
| 33 | * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type = |
| 34 | * 22") to reset any sequence_nr counters belonging to that node. Useful if |
| 35 | * the other node's counter has been reset for some reason. |
| 36 | * -- |
| 37 | * Or not - resetting the counter and bridging the frame would create a |
| 38 | * loop, unfortunately. |
| 39 | * |
| 40 | * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck |
| 41 | * frame is received from a particular node, we know something is wrong. |
| 42 | * We just register these (as with normal frames) and throw them away. |
| 43 | * |
| 44 | * 3) Allow different MAC addresses for the two slave interfaces, using the |
| 45 | * MacAddressA field. |
| 46 | */ |
| 47 | static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) |
| 48 | { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 49 | struct ethhdr *eth_hdr; |
| 50 | struct hsr_sup_tag *hsr_sup_tag; |
| 51 | struct hsrv1_ethhdr_sp *hsr_V1_hdr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 52 | |
| 53 | WARN_ON_ONCE(!skb_mac_header_was_set(skb)); |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 54 | eth_hdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 55 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 56 | /* Correct addr? */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 57 | if (!ether_addr_equal(eth_hdr->h_dest, |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 58 | hsr->sup_multicast_addr)) |
| 59 | return false; |
| 60 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 61 | /* Correct ether type?. */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 62 | if (!(eth_hdr->h_proto == htons(ETH_P_PRP) || |
| 63 | eth_hdr->h_proto == htons(ETH_P_HSR))) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 64 | return false; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 65 | |
| 66 | /* Get the supervision header from correct location. */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 67 | if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */ |
| 68 | hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb); |
| 69 | if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP)) |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 70 | return false; |
| 71 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 72 | hsr_sup_tag = &hsr_V1_hdr->hsr_sup; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 73 | } else { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 74 | hsr_sup_tag = |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 75 | &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup; |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 76 | } |
| 77 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 78 | if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE && |
| 79 | hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 80 | return false; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 81 | if (hsr_sup_tag->HSR_TLV_length != 12 && |
| 82 | hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 83 | return false; |
| 84 | |
| 85 | return true; |
| 86 | } |
| 87 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 88 | static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in, |
| 89 | struct hsr_frame_info *frame) |
| 90 | { |
| 91 | struct sk_buff *skb; |
| 92 | int copylen; |
| 93 | unsigned char *dst, *src; |
| 94 | |
| 95 | skb_pull(skb_in, HSR_HLEN); |
| 96 | skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC); |
| 97 | skb_push(skb_in, HSR_HLEN); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 98 | if (!skb) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 99 | return NULL; |
| 100 | |
| 101 | skb_reset_mac_header(skb); |
| 102 | |
| 103 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 104 | skb->csum_start -= HSR_HLEN; |
| 105 | |
Murali Karicheri | d131fcc | 2019-04-05 13:31:31 -0400 | [diff] [blame] | 106 | copylen = 2 * ETH_ALEN; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 107 | if (frame->is_vlan) |
| 108 | copylen += VLAN_HLEN; |
| 109 | src = skb_mac_header(skb_in); |
| 110 | dst = skb_mac_header(skb); |
| 111 | memcpy(dst, src, copylen); |
| 112 | |
| 113 | skb->protocol = eth_hdr(skb)->h_proto; |
| 114 | return skb; |
| 115 | } |
| 116 | |
| 117 | static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame, |
| 118 | struct hsr_port *port) |
| 119 | { |
| 120 | if (!frame->skb_std) |
| 121 | frame->skb_std = create_stripped_skb(frame->skb_hsr, frame); |
| 122 | return skb_clone(frame->skb_std, GFP_ATOMIC); |
| 123 | } |
| 124 | |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 125 | static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, |
| 126 | struct hsr_frame_info *frame, |
| 127 | struct hsr_port *port, u8 proto_version) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 128 | { |
| 129 | struct hsr_ethhdr *hsr_ethhdr; |
| 130 | int lane_id; |
| 131 | int lsdu_size; |
| 132 | |
Murali Karicheri | 6d6148bc | 2020-07-17 10:55:09 -0400 | [diff] [blame] | 133 | /* pad to minimum packet size which is 60 + 6 (HSR tag) */ |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 134 | if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) |
| 135 | return NULL; |
Murali Karicheri | 6d6148bc | 2020-07-17 10:55:09 -0400 | [diff] [blame] | 136 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 137 | if (port->type == HSR_PT_SLAVE_A) |
| 138 | lane_id = 0; |
| 139 | else |
| 140 | lane_id = 1; |
| 141 | |
| 142 | lsdu_size = skb->len - 14; |
| 143 | if (frame->is_vlan) |
| 144 | lsdu_size -= 4; |
| 145 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 146 | hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 147 | |
| 148 | set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id); |
| 149 | set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size); |
| 150 | hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr); |
| 151 | hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 152 | hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 153 | ETH_P_HSR : ETH_P_PRP); |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 154 | |
| 155 | return skb; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, |
| 159 | struct hsr_frame_info *frame, |
| 160 | struct hsr_port *port) |
| 161 | { |
| 162 | int movelen; |
| 163 | unsigned char *dst, *src; |
| 164 | struct sk_buff *skb; |
| 165 | |
| 166 | /* Create the new skb with enough headroom to fit the HSR tag */ |
| 167 | skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 168 | if (!skb) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 169 | return NULL; |
| 170 | skb_reset_mac_header(skb); |
| 171 | |
| 172 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 173 | skb->csum_start += HSR_HLEN; |
| 174 | |
| 175 | movelen = ETH_HLEN; |
| 176 | if (frame->is_vlan) |
| 177 | movelen += VLAN_HLEN; |
| 178 | |
| 179 | src = skb_mac_header(skb); |
| 180 | dst = skb_push(skb, HSR_HLEN); |
| 181 | memmove(dst, src, movelen); |
| 182 | skb_reset_mac_header(skb); |
| 183 | |
Murali Karicheri | 5d93518 | 2020-07-20 12:43:27 -0400 | [diff] [blame] | 184 | /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in |
| 185 | * that case |
| 186 | */ |
| 187 | return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | /* If the original frame was an HSR tagged frame, just clone it to be sent |
| 191 | * unchanged. Otherwise, create a private frame especially tagged for 'port'. |
| 192 | */ |
| 193 | static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame, |
| 194 | struct hsr_port *port) |
| 195 | { |
| 196 | if (frame->skb_hsr) |
| 197 | return skb_clone(frame->skb_hsr, GFP_ATOMIC); |
| 198 | |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 199 | if (port->type != HSR_PT_SLAVE_A && port->type != HSR_PT_SLAVE_B) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 200 | WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port"); |
| 201 | return NULL; |
| 202 | } |
| 203 | |
| 204 | return create_tagged_skb(frame->skb_std, frame, port); |
| 205 | } |
| 206 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 207 | static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, |
| 208 | struct hsr_node *node_src) |
| 209 | { |
| 210 | bool was_multicast_frame; |
| 211 | int res; |
| 212 | |
| 213 | was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); |
| 214 | hsr_addr_subst_source(node_src, skb); |
| 215 | skb_pull(skb, ETH_HLEN); |
| 216 | res = netif_rx(skb); |
| 217 | if (res == NET_RX_DROP) { |
| 218 | dev->stats.rx_dropped++; |
| 219 | } else { |
| 220 | dev->stats.rx_packets++; |
| 221 | dev->stats.rx_bytes += skb->len; |
| 222 | if (was_multicast_frame) |
| 223 | dev->stats.multicast++; |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, |
| 228 | struct hsr_frame_info *frame) |
| 229 | { |
| 230 | if (frame->port_rcv->type == HSR_PT_MASTER) { |
| 231 | hsr_addr_subst_dest(frame->node_src, skb, port); |
| 232 | |
| 233 | /* Address substitution (IEC62439-3 pp 26, 50): replace mac |
| 234 | * address of outgoing frame with that of the outgoing slave's. |
| 235 | */ |
| 236 | ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr); |
| 237 | } |
| 238 | return dev_queue_xmit(skb); |
| 239 | } |
| 240 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 241 | /* Forward the frame through all devices except: |
| 242 | * - Back through the receiving device |
| 243 | * - If it's a HSR frame: through a device where it has passed before |
| 244 | * - To the local HSR master only if the frame is directly addressed to it, or |
| 245 | * a non-supervision multicast or broadcast frame. |
| 246 | * |
| 247 | * HSR slave devices should insert a HSR tag into the frame, or forward the |
| 248 | * frame unchanged if it's already tagged. Interlink devices should strip HSR |
| 249 | * tags if they're of the non-HSR type (but only after duplicate discard). The |
| 250 | * master device always strips HSR tags. |
| 251 | */ |
| 252 | static void hsr_forward_do(struct hsr_frame_info *frame) |
| 253 | { |
| 254 | struct hsr_port *port; |
| 255 | struct sk_buff *skb; |
| 256 | |
| 257 | hsr_for_each_port(frame->port_rcv->hsr, port) { |
| 258 | /* Don't send frame back the way it came */ |
| 259 | if (port == frame->port_rcv) |
| 260 | continue; |
| 261 | |
| 262 | /* Don't deliver locally unless we should */ |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 263 | if (port->type == HSR_PT_MASTER && !frame->is_local_dest) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 264 | continue; |
| 265 | |
| 266 | /* Deliver frames directly addressed to us to master only */ |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 267 | if (port->type != HSR_PT_MASTER && frame->is_local_exclusive) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 268 | continue; |
| 269 | |
| 270 | /* Don't send frame over port where it has been sent before */ |
| 271 | if (hsr_register_frame_out(port, frame->node_src, |
| 272 | frame->sequence_nr)) |
| 273 | continue; |
| 274 | |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 275 | if (frame->is_supervision && port->type == HSR_PT_MASTER) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 276 | hsr_handle_sup_frame(frame->skb_hsr, |
| 277 | frame->node_src, |
| 278 | frame->port_rcv); |
| 279 | continue; |
| 280 | } |
| 281 | |
| 282 | if (port->type != HSR_PT_MASTER) |
| 283 | skb = frame_get_tagged_skb(frame, port); |
| 284 | else |
| 285 | skb = frame_get_stripped_skb(frame, port); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 286 | if (!skb) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 287 | /* FIXME: Record the dropped frame? */ |
| 288 | continue; |
| 289 | } |
| 290 | |
| 291 | skb->dev = port->dev; |
| 292 | if (port->type == HSR_PT_MASTER) |
| 293 | hsr_deliver_master(skb, port->dev, frame->node_src); |
| 294 | else |
| 295 | hsr_xmit(skb, port, frame); |
| 296 | } |
| 297 | } |
| 298 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 299 | static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, |
| 300 | struct hsr_frame_info *frame) |
| 301 | { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 302 | if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { |
| 303 | frame->is_local_exclusive = true; |
| 304 | skb->pkt_type = PACKET_HOST; |
| 305 | } else { |
| 306 | frame->is_local_exclusive = false; |
| 307 | } |
| 308 | |
Murali Karicheri | 5670342 | 2019-04-05 13:31:25 -0400 | [diff] [blame] | 309 | if (skb->pkt_type == PACKET_HOST || |
| 310 | skb->pkt_type == PACKET_MULTICAST || |
| 311 | skb->pkt_type == PACKET_BROADCAST) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 312 | frame->is_local_dest = true; |
| 313 | } else { |
| 314 | frame->is_local_dest = false; |
| 315 | } |
| 316 | } |
| 317 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 318 | static int hsr_fill_frame_info(struct hsr_frame_info *frame, |
| 319 | struct sk_buff *skb, struct hsr_port *port) |
| 320 | { |
| 321 | struct ethhdr *ethhdr; |
| 322 | unsigned long irqflags; |
| 323 | |
| 324 | frame->is_supervision = is_supervision_frame(port->hsr, skb); |
Karicheri, Muralidharan | 675c8da | 2017-06-12 15:06:26 -0400 | [diff] [blame] | 325 | frame->node_src = hsr_get_node(port, skb, frame->is_supervision); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 326 | if (!frame->node_src) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 327 | return -1; /* Unknown node and !is_supervision, or no mem */ |
| 328 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 329 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 330 | frame->is_vlan = false; |
| 331 | if (ethhdr->h_proto == htons(ETH_P_8021Q)) { |
| 332 | frame->is_vlan = true; |
| 333 | /* FIXME: */ |
Taehee Yoo | f96e871 | 2020-05-06 15:47:45 +0000 | [diff] [blame] | 334 | netdev_warn_once(skb->dev, "VLAN not yet supported"); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 335 | } |
Murali Karicheri | 0594778 | 2019-04-05 13:31:30 -0400 | [diff] [blame] | 336 | if (ethhdr->h_proto == htons(ETH_P_PRP) || |
| 337 | ethhdr->h_proto == htons(ETH_P_HSR)) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 338 | frame->skb_std = NULL; |
| 339 | frame->skb_hsr = skb; |
| 340 | frame->sequence_nr = hsr_get_skb_sequence_nr(skb); |
| 341 | } else { |
| 342 | frame->skb_std = skb; |
| 343 | frame->skb_hsr = NULL; |
| 344 | /* Sequence nr for the master node */ |
| 345 | spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags); |
| 346 | frame->sequence_nr = port->hsr->sequence_nr; |
| 347 | port->hsr->sequence_nr++; |
| 348 | spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags); |
| 349 | } |
| 350 | |
| 351 | frame->port_rcv = port; |
| 352 | check_local_dest(port->hsr, skb, frame); |
| 353 | |
| 354 | return 0; |
| 355 | } |
| 356 | |
| 357 | /* Must be called holding rcu read lock (because of the port parameter) */ |
| 358 | void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) |
| 359 | { |
| 360 | struct hsr_frame_info frame; |
| 361 | |
| 362 | if (skb_mac_header(skb) != skb->data) { |
| 363 | WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n", |
| 364 | __FILE__, __LINE__, port->dev->name); |
| 365 | goto out_drop; |
| 366 | } |
| 367 | |
| 368 | if (hsr_fill_frame_info(&frame, skb, port) < 0) |
| 369 | goto out_drop; |
| 370 | hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); |
| 371 | hsr_forward_do(&frame); |
Murali Karicheri | ee2c46f | 2019-04-15 11:36:03 -0400 | [diff] [blame] | 372 | /* Gets called for ingress frames as well as egress from master port. |
| 373 | * So check and increment stats for master port only here. |
| 374 | */ |
| 375 | if (port->type == HSR_PT_MASTER) { |
| 376 | port->dev->stats.tx_packets++; |
| 377 | port->dev->stats.tx_bytes += skb->len; |
| 378 | } |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 379 | |
Wang Hai | 2b96692 | 2020-07-18 20:53:38 +0800 | [diff] [blame] | 380 | kfree_skb(frame.skb_hsr); |
| 381 | kfree_skb(frame.skb_std); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 382 | return; |
| 383 | |
| 384 | out_drop: |
| 385 | port->dev->stats.tx_dropped++; |
| 386 | kfree_skb(skb); |
| 387 | } |