blob: 0775f0f95dbf3b5fccdac1ac894c7afbb72fe75c [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +01006 *
7 * The HSR spec says never to forward the same frame twice on the same
8 * interface. A frame is identified by its source MAC address and its HSR
9 * sequence number. This code keeps track of senders and their sequence numbers
10 * to allow filtering of duplicate frames, and to detect HSR ring errors.
Murali Karicheri8f4c0e02020-07-22 10:40:16 -040011 * Same code handles filtering of duplicates for PRP as well.
Arvid Brodinf4214362013-10-30 21:10:47 +010012 */
13
14#include <linux/if_ether.h>
15#include <linux/etherdevice.h>
16#include <linux/slab.h>
17#include <linux/rculist.h>
18#include "hsr_main.h"
19#include "hsr_framereg.h"
20#include "hsr_netlink.h"
21
Arvid Brodinf4214362013-10-30 21:10:47 +010022/* TODO: use hash lists for mac addresses (linux/jhash.h)? */
23
Arvid Brodinf266a682014-07-04 23:41:03 +020024/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
25 * false otherwise.
26 */
27static bool seq_nr_after(u16 a, u16 b)
28{
29 /* Remove inconsistency where
30 * seq_nr_after(a, b) == seq_nr_before(a, b)
31 */
Murali Karicheri5fa96772019-04-05 13:31:29 -040032 if ((int)b - a == 32768)
Arvid Brodinf266a682014-07-04 23:41:03 +020033 return false;
34
Murali Karicheri5fa96772019-04-05 13:31:29 -040035 return (((s16)(b - a)) < 0);
Arvid Brodinf266a682014-07-04 23:41:03 +020036}
Murali Karicheri9f73c2b2019-04-05 13:31:33 -040037
Arvid Brodinf266a682014-07-04 23:41:03 +020038#define seq_nr_before(a, b) seq_nr_after((b), (a))
Arvid Brodinf266a682014-07-04 23:41:03 +020039#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
40
Arvid Brodinf266a682014-07-04 23:41:03 +020041bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
42{
43 struct hsr_node *node;
44
45 node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
46 mac_list);
47 if (!node) {
48 WARN_ONCE(1, "HSR: No self node\n");
49 return false;
50 }
51
Murali Karicherib1b4aa92019-04-05 13:31:32 -040052 if (ether_addr_equal(addr, node->macaddress_A))
Arvid Brodinf266a682014-07-04 23:41:03 +020053 return true;
Murali Karicherib1b4aa92019-04-05 13:31:32 -040054 if (ether_addr_equal(addr, node->macaddress_B))
Arvid Brodinf266a682014-07-04 23:41:03 +020055 return true;
56
57 return false;
58}
Arvid Brodinf4214362013-10-30 21:10:47 +010059
60/* Search for mac entry. Caller must hold rcu read lock.
61 */
Murali Karicherib1b4aa92019-04-05 13:31:32 -040062static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
63 const unsigned char addr[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +010064{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020065 struct hsr_node *node;
Arvid Brodinf4214362013-10-30 21:10:47 +010066
67 list_for_each_entry_rcu(node, node_db, mac_list) {
Murali Karicherib1b4aa92019-04-05 13:31:32 -040068 if (ether_addr_equal(node->macaddress_A, addr))
Arvid Brodinf4214362013-10-30 21:10:47 +010069 return node;
70 }
71
72 return NULL;
73}
74
Arvid Brodinf4214362013-10-30 21:10:47 +010075/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
76 * frames from self that's been looped over the HSR ring.
77 */
Taehee Yoo92a35672019-12-22 11:26:54 +000078int hsr_create_self_node(struct hsr_priv *hsr,
Jakub Kicinski39c19fb2021-10-22 16:21:00 -070079 const unsigned char addr_a[ETH_ALEN],
80 const unsigned char addr_b[ETH_ALEN])
Arvid Brodinf4214362013-10-30 21:10:47 +010081{
Taehee Yoo92a35672019-12-22 11:26:54 +000082 struct list_head *self_node_db = &hsr->self_node_db;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020083 struct hsr_node *node, *oldnode;
Arvid Brodinf4214362013-10-30 21:10:47 +010084
85 node = kmalloc(sizeof(*node), GFP_KERNEL);
86 if (!node)
87 return -ENOMEM;
88
Murali Karicherib1b4aa92019-04-05 13:31:32 -040089 ether_addr_copy(node->macaddress_A, addr_a);
90 ether_addr_copy(node->macaddress_B, addr_b);
Arvid Brodinf4214362013-10-30 21:10:47 +010091
Taehee Yoo92a35672019-12-22 11:26:54 +000092 spin_lock_bh(&hsr->list_lock);
Arvid Brodinf4214362013-10-30 21:10:47 +010093 oldnode = list_first_or_null_rcu(self_node_db,
Murali Karicheri4fe25bd2019-04-05 13:31:26 -040094 struct hsr_node, mac_list);
Arvid Brodinf4214362013-10-30 21:10:47 +010095 if (oldnode) {
96 list_replace_rcu(&oldnode->mac_list, &node->mac_list);
Taehee Yoo92a35672019-12-22 11:26:54 +000097 spin_unlock_bh(&hsr->list_lock);
98 kfree_rcu(oldnode, rcu_head);
Arvid Brodinf4214362013-10-30 21:10:47 +010099 } else {
Arvid Brodinf4214362013-10-30 21:10:47 +0100100 list_add_tail_rcu(&node->mac_list, self_node_db);
Taehee Yoo92a35672019-12-22 11:26:54 +0000101 spin_unlock_bh(&hsr->list_lock);
Arvid Brodinf4214362013-10-30 21:10:47 +0100102 }
103
104 return 0;
105}
106
Taehee Yoo92a35672019-12-22 11:26:54 +0000107void hsr_del_self_node(struct hsr_priv *hsr)
Mao Wenan6caabe72019-03-06 22:45:01 +0800108{
Taehee Yoo92a35672019-12-22 11:26:54 +0000109 struct list_head *self_node_db = &hsr->self_node_db;
Mao Wenan6caabe72019-03-06 22:45:01 +0800110 struct hsr_node *node;
111
Taehee Yoo92a35672019-12-22 11:26:54 +0000112 spin_lock_bh(&hsr->list_lock);
Mao Wenan6caabe72019-03-06 22:45:01 +0800113 node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
Mao Wenan6caabe72019-03-06 22:45:01 +0800114 if (node) {
115 list_del_rcu(&node->mac_list);
Taehee Yoo92a35672019-12-22 11:26:54 +0000116 kfree_rcu(node, rcu_head);
Mao Wenan6caabe72019-03-06 22:45:01 +0800117 }
Taehee Yoo92a35672019-12-22 11:26:54 +0000118 spin_unlock_bh(&hsr->list_lock);
Mao Wenan6caabe72019-03-06 22:45:01 +0800119}
Arvid Brodinf4214362013-10-30 21:10:47 +0100120
Cong Wangb9a1e622019-07-03 17:21:13 -0700121void hsr_del_nodes(struct list_head *node_db)
122{
123 struct hsr_node *node;
124 struct hsr_node *tmp;
125
126 list_for_each_entry_safe(node, tmp, node_db, mac_list)
127 kfree(node);
128}
129
Murali Karicheri451d8122020-07-22 10:40:21 -0400130void prp_handle_san_frame(bool san, enum hsr_port_type port,
131 struct hsr_node *node)
132{
133 /* Mark if the SAN node is over LAN_A or LAN_B */
134 if (port == HSR_PT_SLAVE_A) {
135 node->san_a = true;
136 return;
137 }
138
139 if (port == HSR_PT_SLAVE_B)
140 node->san_b = true;
141}
142
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400143/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
Arvid Brodinf266a682014-07-04 23:41:03 +0200144 * seq_out is used to initialize filtering of outgoing duplicate frames
145 * originating from the newly added node.
Arvid Brodinf4214362013-10-30 21:10:47 +0100146 */
Taehee Yoo92a35672019-12-22 11:26:54 +0000147static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
148 struct list_head *node_db,
149 unsigned char addr[],
Murali Karicheri451d8122020-07-22 10:40:21 -0400150 u16 seq_out, bool san,
151 enum hsr_port_type rx_port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100152{
Taehee Yoo92a35672019-12-22 11:26:54 +0000153 struct hsr_node *new_node, *node;
Arvid Brodinf4214362013-10-30 21:10:47 +0100154 unsigned long now;
Arvid Brodinf266a682014-07-04 23:41:03 +0200155 int i;
Arvid Brodinf4214362013-10-30 21:10:47 +0100156
Taehee Yoo92a35672019-12-22 11:26:54 +0000157 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
158 if (!new_node)
Arvid Brodinf4214362013-10-30 21:10:47 +0100159 return NULL;
160
Taehee Yoo92a35672019-12-22 11:26:54 +0000161 ether_addr_copy(new_node->macaddress_A, addr);
Arvid Brodinf4214362013-10-30 21:10:47 +0100162
163 /* We are only interested in time diffs here, so use current jiffies
164 * as initialization. (0 could trigger an spurious ring error warning).
165 */
166 now = jiffies;
Marco Wenzelf1764112021-02-24 10:46:49 +0100167 for (i = 0; i < HSR_PT_PORTS; i++) {
Taehee Yoo92a35672019-12-22 11:26:54 +0000168 new_node->time_in[i] = now;
Marco Wenzelf1764112021-02-24 10:46:49 +0100169 new_node->time_out[i] = now;
170 }
Arvid Brodinc5a75912014-07-04 23:38:05 +0200171 for (i = 0; i < HSR_PT_PORTS; i++)
Taehee Yoo92a35672019-12-22 11:26:54 +0000172 new_node->seq_out[i] = seq_out;
Arvid Brodinf4214362013-10-30 21:10:47 +0100173
Murali Karicheri451d8122020-07-22 10:40:21 -0400174 if (san && hsr->proto_ops->handle_san_frame)
175 hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
176
Taehee Yoo92a35672019-12-22 11:26:54 +0000177 spin_lock_bh(&hsr->list_lock);
Amol Grovera7a94562020-02-19 15:30:11 +0530178 list_for_each_entry_rcu(node, node_db, mac_list,
179 lockdep_is_held(&hsr->list_lock)) {
Taehee Yoo92a35672019-12-22 11:26:54 +0000180 if (ether_addr_equal(node->macaddress_A, addr))
181 goto out;
182 if (ether_addr_equal(node->macaddress_B, addr))
183 goto out;
184 }
185 list_add_tail_rcu(&new_node->mac_list, node_db);
186 spin_unlock_bh(&hsr->list_lock);
187 return new_node;
188out:
189 spin_unlock_bh(&hsr->list_lock);
190 kfree(new_node);
Arvid Brodinf4214362013-10-30 21:10:47 +0100191 return node;
192}
193
Murali Karicheri451d8122020-07-22 10:40:21 -0400194void prp_update_san_info(struct hsr_node *node, bool is_sup)
195{
196 if (!is_sup)
197 return;
198
199 node->san_a = false;
200 node->san_b = false;
201}
202
Arvid Brodinf266a682014-07-04 23:41:03 +0200203/* Get the hsr_node from which 'skb' was sent.
204 */
Murali Karicheri451d8122020-07-22 10:40:21 -0400205struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
206 struct sk_buff *skb, bool is_sup,
207 enum hsr_port_type rx_port)
Arvid Brodinf266a682014-07-04 23:41:03 +0200208{
Taehee Yoo92a35672019-12-22 11:26:54 +0000209 struct hsr_priv *hsr = port->hsr;
Arvid Brodinf266a682014-07-04 23:41:03 +0200210 struct hsr_node *node;
211 struct ethhdr *ethhdr;
Murali Karicheri451d8122020-07-22 10:40:21 -0400212 struct prp_rct *rct;
213 bool san = false;
Arvid Brodinf266a682014-07-04 23:41:03 +0200214 u16 seq_out;
215
216 if (!skb_mac_header_was_set(skb))
217 return NULL;
218
Murali Karicheri5fa96772019-04-05 13:31:29 -0400219 ethhdr = (struct ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200220
221 list_for_each_entry_rcu(node, node_db, mac_list) {
Murali Karicheri451d8122020-07-22 10:40:21 -0400222 if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
223 if (hsr->proto_ops->update_san_info)
224 hsr->proto_ops->update_san_info(node, is_sup);
Arvid Brodinf266a682014-07-04 23:41:03 +0200225 return node;
Murali Karicheri451d8122020-07-22 10:40:21 -0400226 }
227 if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
228 if (hsr->proto_ops->update_san_info)
229 hsr->proto_ops->update_san_info(node, is_sup);
Arvid Brodinf266a682014-07-04 23:41:03 +0200230 return node;
Murali Karicheri451d8122020-07-22 10:40:21 -0400231 }
Arvid Brodinf266a682014-07-04 23:41:03 +0200232 }
233
Murali Karicheri451d8122020-07-22 10:40:21 -0400234 /* Everyone may create a node entry, connected node to a HSR/PRP
235 * device.
236 */
Murali Karicheri05947782019-04-05 13:31:30 -0400237 if (ethhdr->h_proto == htons(ETH_P_PRP) ||
238 ethhdr->h_proto == htons(ETH_P_HSR)) {
Arvid Brodinf266a682014-07-04 23:41:03 +0200239 /* Use the existing sequence_nr from the tag as starting point
240 * for filtering duplicate frames.
241 */
242 seq_out = hsr_get_skb_sequence_nr(skb) - 1;
243 } else {
Murali Karicheri451d8122020-07-22 10:40:21 -0400244 rct = skb_get_PRP_rct(skb);
245 if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
246 seq_out = prp_get_skb_sequence_nr(rct);
247 } else {
248 if (rx_port != HSR_PT_MASTER)
249 san = true;
250 seq_out = HSR_SEQNR_START;
251 }
Arvid Brodinf266a682014-07-04 23:41:03 +0200252 }
253
Murali Karicheri451d8122020-07-22 10:40:21 -0400254 return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
255 san, rx_port);
Arvid Brodinf266a682014-07-04 23:41:03 +0200256}
257
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400258/* Use the Supervision frame's info about an eventual macaddress_B for merging
259 * nodes that has previously had their macaddress_B registered as a separate
Arvid Brodinf266a682014-07-04 23:41:03 +0200260 * node.
261 */
Murali Karicheri451d8122020-07-22 10:40:21 -0400262void hsr_handle_sup_frame(struct hsr_frame_info *frame)
Arvid Brodinf266a682014-07-04 23:41:03 +0200263{
Murali Karicheri451d8122020-07-22 10:40:21 -0400264 struct hsr_node *node_curr = frame->node_src;
265 struct hsr_port *port_rcv = frame->port_rcv;
Taehee Yoo92a35672019-12-22 11:26:54 +0000266 struct hsr_priv *hsr = port_rcv->hsr;
Arvid Brodinf266a682014-07-04 23:41:03 +0200267 struct hsr_sup_payload *hsr_sp;
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200268 struct hsr_sup_tlv *hsr_sup_tlv;
Taehee Yoo92a35672019-12-22 11:26:54 +0000269 struct hsr_node *node_real;
Murali Karicheri451d8122020-07-22 10:40:21 -0400270 struct sk_buff *skb = NULL;
Arvid Brodinf266a682014-07-04 23:41:03 +0200271 struct list_head *node_db;
Taehee Yoo92a35672019-12-22 11:26:54 +0000272 struct ethhdr *ethhdr;
Arvid Brodinf266a682014-07-04 23:41:03 +0200273 int i;
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200274 unsigned int pull_size = 0;
275 unsigned int total_pull_size = 0;
Arvid Brodinf266a682014-07-04 23:41:03 +0200276
Murali Karicheri451d8122020-07-22 10:40:21 -0400277 /* Here either frame->skb_hsr or frame->skb_prp should be
278 * valid as supervision frame always will have protocol
279 * header info.
280 */
281 if (frame->skb_hsr)
282 skb = frame->skb_hsr;
283 else if (frame->skb_prp)
284 skb = frame->skb_prp;
George McCollisterdcf0cd12021-02-09 19:02:11 -0600285 else if (frame->skb_std)
286 skb = frame->skb_std;
Murali Karicheri451d8122020-07-22 10:40:21 -0400287 if (!skb)
288 return;
289
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200290 /* Leave the ethernet header. */
291 pull_size = sizeof(struct ethhdr);
292 skb_pull(skb, pull_size);
293 total_pull_size += pull_size;
294
Murali Karicheri5fa96772019-04-05 13:31:29 -0400295 ethhdr = (struct ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200296
Peter Heiseee1c2792016-04-13 13:52:22 +0200297 /* And leave the HSR tag. */
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200298 if (ethhdr->h_proto == htons(ETH_P_HSR)) {
299 pull_size = sizeof(struct ethhdr);
300 skb_pull(skb, pull_size);
301 total_pull_size += pull_size;
302 }
Peter Heiseee1c2792016-04-13 13:52:22 +0200303
304 /* And leave the HSR sup tag. */
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200305 pull_size = sizeof(struct hsr_tag);
306 skb_pull(skb, pull_size);
307 total_pull_size += pull_size;
Peter Heiseee1c2792016-04-13 13:52:22 +0200308
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200309 /* get HSR sup payload */
Murali Karicheri5fa96772019-04-05 13:31:29 -0400310 hsr_sp = (struct hsr_sup_payload *)skb->data;
Arvid Brodinf266a682014-07-04 23:41:03 +0200311
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400312 /* Merge node_curr (registered on macaddress_B) into node_real */
Arvid Brodinf266a682014-07-04 23:41:03 +0200313 node_db = &port_rcv->hsr->node_db;
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400314 node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
Arvid Brodinf266a682014-07-04 23:41:03 +0200315 if (!node_real)
316 /* No frame received from AddrA of this node yet */
Taehee Yoo92a35672019-12-22 11:26:54 +0000317 node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
Murali Karicheri451d8122020-07-22 10:40:21 -0400318 HSR_SEQNR_START - 1, true,
319 port_rcv->type);
Arvid Brodinf266a682014-07-04 23:41:03 +0200320 if (!node_real)
321 goto done; /* No mem */
322 if (node_real == node_curr)
323 /* Node has already been merged */
324 goto done;
325
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200326 /* Leave the first HSR sup payload. */
327 pull_size = sizeof(struct hsr_sup_payload);
328 skb_pull(skb, pull_size);
329 total_pull_size += pull_size;
330
331 /* Get second supervision tlv */
332 hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
333 /* And check if it is a redbox mac TLV */
334 if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
335 /* We could stop here after pushing hsr_sup_payload,
336 * or proceed and allow macaddress_B and for redboxes.
337 */
338 /* Sanity check length */
339 if (hsr_sup_tlv->HSR_TLV_length != 6)
340 goto done;
341
342 /* Leave the second HSR sup tlv. */
343 pull_size = sizeof(struct hsr_sup_tlv);
344 skb_pull(skb, pull_size);
345 total_pull_size += pull_size;
346
347 /* Get redbox mac address. */
348 hsr_sp = (struct hsr_sup_payload *)skb->data;
349
350 /* Check if redbox mac and node mac are equal. */
351 if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
352 /* This is a redbox supervision frame for a VDAN! */
353 goto done;
354 }
355 }
356
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400357 ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
Arvid Brodinf266a682014-07-04 23:41:03 +0200358 for (i = 0; i < HSR_PT_PORTS; i++) {
359 if (!node_curr->time_in_stale[i] &&
360 time_after(node_curr->time_in[i], node_real->time_in[i])) {
361 node_real->time_in[i] = node_curr->time_in[i];
Murali Karicherid595b852019-04-05 13:31:23 -0400362 node_real->time_in_stale[i] =
363 node_curr->time_in_stale[i];
Arvid Brodinf266a682014-07-04 23:41:03 +0200364 }
365 if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
366 node_real->seq_out[i] = node_curr->seq_out[i];
367 }
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400368 node_real->addr_B_port = port_rcv->type;
Arvid Brodinf266a682014-07-04 23:41:03 +0200369
Taehee Yoo92a35672019-12-22 11:26:54 +0000370 spin_lock_bh(&hsr->list_lock);
Arvid Brodinf266a682014-07-04 23:41:03 +0200371 list_del_rcu(&node_curr->mac_list);
Taehee Yoo92a35672019-12-22 11:26:54 +0000372 spin_unlock_bh(&hsr->list_lock);
Arvid Brodinf266a682014-07-04 23:41:03 +0200373 kfree_rcu(node_curr, rcu_head);
374
375done:
Andreas Oetkeneafaa882021-10-25 20:56:18 +0200376 /* Push back here */
377 skb_push(skb, total_pull_size);
Arvid Brodinf266a682014-07-04 23:41:03 +0200378}
379
Arvid Brodinf4214362013-10-30 21:10:47 +0100380/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
381 *
Arvid Brodinf266a682014-07-04 23:41:03 +0200382 * If the frame was sent by a node's B interface, replace the source
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400383 * address with that node's "official" address (macaddress_A) so that upper
Arvid Brodinf4214362013-10-30 21:10:47 +0100384 * layers recognize where it came from.
385 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200386void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
Arvid Brodinf4214362013-10-30 21:10:47 +0100387{
Arvid Brodinf4214362013-10-30 21:10:47 +0100388 if (!skb_mac_header_was_set(skb)) {
389 WARN_ONCE(1, "%s: Mac header not set\n", __func__);
390 return;
391 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100392
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400393 memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
Arvid Brodinf4214362013-10-30 21:10:47 +0100394}
395
Arvid Brodinf4214362013-10-30 21:10:47 +0100396/* 'skb' is a frame meant for another host.
Arvid Brodinf266a682014-07-04 23:41:03 +0200397 * 'port' is the outgoing interface
Arvid Brodinf4214362013-10-30 21:10:47 +0100398 *
399 * Substitute the target (dest) MAC address if necessary, so the it matches the
400 * recipient interface MAC address, regardless of whether that is the
401 * recipient's A or B interface.
402 * This is needed to keep the packets flowing through switches that learn on
403 * which "side" the different interfaces are.
404 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200405void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
Arvid Brodinc5a75912014-07-04 23:38:05 +0200406 struct hsr_port *port)
Arvid Brodinf4214362013-10-30 21:10:47 +0100407{
Arvid Brodinf266a682014-07-04 23:41:03 +0200408 struct hsr_node *node_dst;
Arvid Brodinf4214362013-10-30 21:10:47 +0100409
Arvid Brodinf266a682014-07-04 23:41:03 +0200410 if (!skb_mac_header_was_set(skb)) {
411 WARN_ONCE(1, "%s: Mac header not set\n", __func__);
412 return;
413 }
414
415 if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
416 return;
417
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400418 node_dst = find_node_by_addr_A(&port->hsr->node_db,
419 eth_hdr(skb)->h_dest);
Arvid Brodinf266a682014-07-04 23:41:03 +0200420 if (!node_dst) {
Taehee Yoo4b793ac2020-02-28 18:01:46 +0000421 if (net_ratelimit())
422 netdev_err(skb->dev, "%s: Unknown node\n", __func__);
Arvid Brodinf266a682014-07-04 23:41:03 +0200423 return;
424 }
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400425 if (port->type != node_dst->addr_B_port)
Arvid Brodinf266a682014-07-04 23:41:03 +0200426 return;
Arvid Brodinf266a682014-07-04 23:41:03 +0200427
Murali Karicherieea9f732020-07-17 10:55:10 -0400428 if (is_valid_ether_addr(node_dst->macaddress_B))
429 ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
Arvid Brodinf4214362013-10-30 21:10:47 +0100430}
431
Arvid Brodinf266a682014-07-04 23:41:03 +0200432void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
433 u16 sequence_nr)
Arvid Brodinf4214362013-10-30 21:10:47 +0100434{
Arvid Brodinf266a682014-07-04 23:41:03 +0200435 /* Don't register incoming frames without a valid sequence number. This
436 * ensures entries of restarted nodes gets pruned so that they can
437 * re-register and resume communications.
Arvid Brodin213e3bc2013-11-29 23:37:07 +0100438 */
George McCollisterc2ae34a72021-06-15 12:50:37 -0500439 if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
440 seq_nr_before(sequence_nr, node->seq_out[port->type]))
Arvid Brodinf266a682014-07-04 23:41:03 +0200441 return;
Arvid Brodinf4214362013-10-30 21:10:47 +0100442
Arvid Brodinc5a75912014-07-04 23:38:05 +0200443 node->time_in[port->type] = jiffies;
444 node->time_in_stale[port->type] = false;
Arvid Brodinf4214362013-10-30 21:10:47 +0100445}
446
Arvid Brodinf4214362013-10-30 21:10:47 +0100447/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
448 * ethhdr->h_source address and skb->mac_header set.
449 *
450 * Return:
451 * 1 if frame can be shown to have been sent recently on this interface,
452 * 0 otherwise, or
453 * negative error code on error
454 */
Arvid Brodinf266a682014-07-04 23:41:03 +0200455int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
456 u16 sequence_nr)
Arvid Brodinf4214362013-10-30 21:10:47 +0100457{
Marco Wenzelf1764112021-02-24 10:46:49 +0100458 if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
459 time_is_after_jiffies(node->time_out[port->type] +
460 msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)))
Arvid Brodinf4214362013-10-30 21:10:47 +0100461 return 1;
462
Marco Wenzelf1764112021-02-24 10:46:49 +0100463 node->time_out[port->type] = jiffies;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200464 node->seq_out[port->type] = sequence_nr;
Arvid Brodinf4214362013-10-30 21:10:47 +0100465 return 0;
466}
467
Arvid Brodinc5a75912014-07-04 23:38:05 +0200468static struct hsr_port *get_late_port(struct hsr_priv *hsr,
469 struct hsr_node *node)
Arvid Brodinf4214362013-10-30 21:10:47 +0100470{
Arvid Brodinc5a75912014-07-04 23:38:05 +0200471 if (node->time_in_stale[HSR_PT_SLAVE_A])
472 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
473 if (node->time_in_stale[HSR_PT_SLAVE_B])
474 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
Arvid Brodinf4214362013-10-30 21:10:47 +0100475
Arvid Brodinc5a75912014-07-04 23:38:05 +0200476 if (time_after(node->time_in[HSR_PT_SLAVE_B],
477 node->time_in[HSR_PT_SLAVE_A] +
478 msecs_to_jiffies(MAX_SLAVE_DIFF)))
479 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
480 if (time_after(node->time_in[HSR_PT_SLAVE_A],
481 node->time_in[HSR_PT_SLAVE_B] +
482 msecs_to_jiffies(MAX_SLAVE_DIFF)))
483 return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
Arvid Brodinf4214362013-10-30 21:10:47 +0100484
Arvid Brodinc5a75912014-07-04 23:38:05 +0200485 return NULL;
Arvid Brodinf4214362013-10-30 21:10:47 +0100486}
487
Arvid Brodinf4214362013-10-30 21:10:47 +0100488/* Remove stale sequence_nr records. Called by timer every
489 * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
490 */
Kees Cookdda436b2017-10-24 01:46:16 -0700491void hsr_prune_nodes(struct timer_list *t)
Arvid Brodinf4214362013-10-30 21:10:47 +0100492{
Kees Cookdda436b2017-10-24 01:46:16 -0700493 struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200494 struct hsr_node *node;
Taehee Yoo92a35672019-12-22 11:26:54 +0000495 struct hsr_node *tmp;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200496 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100497 unsigned long timestamp;
498 unsigned long time_a, time_b;
499
Taehee Yoo92a35672019-12-22 11:26:54 +0000500 spin_lock_bh(&hsr->list_lock);
501 list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
Andreas Oetkend2daa122019-05-23 13:57:14 +0200502 /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
503 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
504 * the master port. Thus the master node will be repeatedly
505 * pruned leading to packet loss.
506 */
507 if (hsr_addr_is_self(hsr, node->macaddress_A))
508 continue;
509
Arvid Brodinf4214362013-10-30 21:10:47 +0100510 /* Shorthand */
Arvid Brodinc5a75912014-07-04 23:38:05 +0200511 time_a = node->time_in[HSR_PT_SLAVE_A];
512 time_b = node->time_in[HSR_PT_SLAVE_B];
Arvid Brodinf4214362013-10-30 21:10:47 +0100513
514 /* Check for timestamps old enough to risk wrap-around */
Murali Karicherid131fcc2019-04-05 13:31:31 -0400515 if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
Arvid Brodinc5a75912014-07-04 23:38:05 +0200516 node->time_in_stale[HSR_PT_SLAVE_A] = true;
Murali Karicherid131fcc2019-04-05 13:31:31 -0400517 if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
Arvid Brodinc5a75912014-07-04 23:38:05 +0200518 node->time_in_stale[HSR_PT_SLAVE_B] = true;
Arvid Brodinf4214362013-10-30 21:10:47 +0100519
520 /* Get age of newest frame from node.
521 * At least one time_in is OK here; nodes get pruned long
522 * before both time_ins can get stale
523 */
524 timestamp = time_a;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200525 if (node->time_in_stale[HSR_PT_SLAVE_A] ||
526 (!node->time_in_stale[HSR_PT_SLAVE_B] &&
Arvid Brodinf4214362013-10-30 21:10:47 +0100527 time_after(time_b, time_a)))
528 timestamp = time_b;
529
530 /* Warn of ring error only as long as we get frames at all */
531 if (time_is_after_jiffies(timestamp +
Murali Karicherid131fcc2019-04-05 13:31:31 -0400532 msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
Arvid Brodinc5a75912014-07-04 23:38:05 +0200533 rcu_read_lock();
534 port = get_late_port(hsr, node);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400535 if (port)
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400536 hsr_nl_ringerror(hsr, node->macaddress_A, port);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200537 rcu_read_unlock();
Arvid Brodinf4214362013-10-30 21:10:47 +0100538 }
539
540 /* Prune old entries */
541 if (time_is_before_jiffies(timestamp +
Murali Karicherid595b852019-04-05 13:31:23 -0400542 msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400543 hsr_nl_nodedown(hsr, node->macaddress_A);
Arvid Brodinf4214362013-10-30 21:10:47 +0100544 list_del_rcu(&node->mac_list);
545 /* Note that we need to free this entry later: */
Wei Yongjun1aee6cc2013-12-16 14:05:50 +0800546 kfree_rcu(node, rcu_head);
Arvid Brodinf4214362013-10-30 21:10:47 +0100547 }
548 }
Taehee Yoo92a35672019-12-22 11:26:54 +0000549 spin_unlock_bh(&hsr->list_lock);
Aaron Kramer5150b452019-04-05 13:31:36 -0400550
551 /* Restart timer */
552 mod_timer(&hsr->prune_timer,
553 jiffies + msecs_to_jiffies(PRUNE_PERIOD));
Arvid Brodinf4214362013-10-30 21:10:47 +0100554}
555
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200556void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
Arvid Brodinf4214362013-10-30 21:10:47 +0100557 unsigned char addr[ETH_ALEN])
558{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200559 struct hsr_node *node;
Arvid Brodinf4214362013-10-30 21:10:47 +0100560
561 if (!_pos) {
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200562 node = list_first_or_null_rcu(&hsr->node_db,
563 struct hsr_node, mac_list);
Arvid Brodinf4214362013-10-30 21:10:47 +0100564 if (node)
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400565 ether_addr_copy(addr, node->macaddress_A);
Arvid Brodinf4214362013-10-30 21:10:47 +0100566 return node;
567 }
568
569 node = _pos;
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200570 list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400571 ether_addr_copy(addr, node->macaddress_A);
Arvid Brodinf4214362013-10-30 21:10:47 +0100572 return node;
573 }
574
575 return NULL;
576}
577
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200578int hsr_get_node_data(struct hsr_priv *hsr,
Arvid Brodinf4214362013-10-30 21:10:47 +0100579 const unsigned char *addr,
580 unsigned char addr_b[ETH_ALEN],
581 unsigned int *addr_b_ifindex,
582 int *if1_age,
583 u16 *if1_seq,
584 int *if2_age,
585 u16 *if2_seq)
586{
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200587 struct hsr_node *node;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200588 struct hsr_port *port;
Arvid Brodinf4214362013-10-30 21:10:47 +0100589 unsigned long tdiff;
590
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400591 node = find_node_by_addr_A(&hsr->node_db, addr);
Taehee Yoo173756b2020-03-13 06:50:14 +0000592 if (!node)
593 return -ENOENT;
Arvid Brodinf4214362013-10-30 21:10:47 +0100594
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400595 ether_addr_copy(addr_b, node->macaddress_B);
Arvid Brodinf4214362013-10-30 21:10:47 +0100596
Arvid Brodinc5a75912014-07-04 23:38:05 +0200597 tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
598 if (node->time_in_stale[HSR_PT_SLAVE_A])
Arvid Brodinf4214362013-10-30 21:10:47 +0100599 *if1_age = INT_MAX;
600#if HZ <= MSEC_PER_SEC
601 else if (tdiff > msecs_to_jiffies(INT_MAX))
602 *if1_age = INT_MAX;
603#endif
604 else
605 *if1_age = jiffies_to_msecs(tdiff);
606
Arvid Brodinc5a75912014-07-04 23:38:05 +0200607 tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
608 if (node->time_in_stale[HSR_PT_SLAVE_B])
Arvid Brodinf4214362013-10-30 21:10:47 +0100609 *if2_age = INT_MAX;
610#if HZ <= MSEC_PER_SEC
611 else if (tdiff > msecs_to_jiffies(INT_MAX))
612 *if2_age = INT_MAX;
613#endif
614 else
615 *if2_age = jiffies_to_msecs(tdiff);
616
617 /* Present sequence numbers as if they were incoming on interface */
Arvid Brodinc5a75912014-07-04 23:38:05 +0200618 *if1_seq = node->seq_out[HSR_PT_SLAVE_B];
619 *if2_seq = node->seq_out[HSR_PT_SLAVE_A];
Arvid Brodinf4214362013-10-30 21:10:47 +0100620
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400621 if (node->addr_B_port != HSR_PT_NONE) {
622 port = hsr_port_get_hsr(hsr, node->addr_B_port);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200623 *addr_b_ifindex = port->dev->ifindex;
624 } else {
Arvid Brodinf4214362013-10-30 21:10:47 +0100625 *addr_b_ifindex = -1;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200626 }
Arvid Brodinf4214362013-10-30 21:10:47 +0100627
Arvid Brodinf4214362013-10-30 21:10:47 +0100628 return 0;
629}