Murali Karicheri | 0e7623b | 2019-04-05 13:31:34 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 2 | /* Copyright 2011-2014 Autronica Fire and Security AS |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 3 | * |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 4 | * Author(s): |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 5 | * 2011-2014 Arvid Brodin, arvid.brodin@alten.se |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 6 | * |
| 7 | * The HSR spec says never to forward the same frame twice on the same |
| 8 | * interface. A frame is identified by its source MAC address and its HSR |
| 9 | * sequence number. This code keeps track of senders and their sequence numbers |
| 10 | * to allow filtering of duplicate frames, and to detect HSR ring errors. |
Murali Karicheri | 8f4c0e0 | 2020-07-22 10:40:16 -0400 | [diff] [blame^] | 11 | * Same code handles filtering of duplicates for PRP as well. |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | #include <linux/if_ether.h> |
| 15 | #include <linux/etherdevice.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/rculist.h> |
| 18 | #include "hsr_main.h" |
| 19 | #include "hsr_framereg.h" |
| 20 | #include "hsr_netlink.h" |
| 21 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 22 | /* TODO: use hash lists for mac addresses (linux/jhash.h)? */ |
| 23 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 24 | /* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b, |
| 25 | * false otherwise. |
| 26 | */ |
| 27 | static bool seq_nr_after(u16 a, u16 b) |
| 28 | { |
| 29 | /* Remove inconsistency where |
| 30 | * seq_nr_after(a, b) == seq_nr_before(a, b) |
| 31 | */ |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 32 | if ((int)b - a == 32768) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 33 | return false; |
| 34 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 35 | return (((s16)(b - a)) < 0); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 36 | } |
Murali Karicheri | 9f73c2b | 2019-04-05 13:31:33 -0400 | [diff] [blame] | 37 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 38 | #define seq_nr_before(a, b) seq_nr_after((b), (a)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 39 | #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b))) |
| 40 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 41 | bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr) |
| 42 | { |
| 43 | struct hsr_node *node; |
| 44 | |
| 45 | node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node, |
| 46 | mac_list); |
| 47 | if (!node) { |
| 48 | WARN_ONCE(1, "HSR: No self node\n"); |
| 49 | return false; |
| 50 | } |
| 51 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 52 | if (ether_addr_equal(addr, node->macaddress_A)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 53 | return true; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 54 | if (ether_addr_equal(addr, node->macaddress_B)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 55 | return true; |
| 56 | |
| 57 | return false; |
| 58 | } |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 59 | |
| 60 | /* Search for mac entry. Caller must hold rcu read lock. |
| 61 | */ |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 62 | static struct hsr_node *find_node_by_addr_A(struct list_head *node_db, |
| 63 | const unsigned char addr[ETH_ALEN]) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 64 | { |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 65 | struct hsr_node *node; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 66 | |
| 67 | list_for_each_entry_rcu(node, node_db, mac_list) { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 68 | if (ether_addr_equal(node->macaddress_A, addr)) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 69 | return node; |
| 70 | } |
| 71 | |
| 72 | return NULL; |
| 73 | } |
| 74 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 75 | /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize |
| 76 | * frames from self that's been looped over the HSR ring. |
| 77 | */ |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 78 | int hsr_create_self_node(struct hsr_priv *hsr, |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 79 | unsigned char addr_a[ETH_ALEN], |
| 80 | unsigned char addr_b[ETH_ALEN]) |
| 81 | { |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 82 | struct list_head *self_node_db = &hsr->self_node_db; |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 83 | struct hsr_node *node, *oldnode; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 84 | |
| 85 | node = kmalloc(sizeof(*node), GFP_KERNEL); |
| 86 | if (!node) |
| 87 | return -ENOMEM; |
| 88 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 89 | ether_addr_copy(node->macaddress_A, addr_a); |
| 90 | ether_addr_copy(node->macaddress_B, addr_b); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 91 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 92 | spin_lock_bh(&hsr->list_lock); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 93 | oldnode = list_first_or_null_rcu(self_node_db, |
Murali Karicheri | 4fe25bd | 2019-04-05 13:31:26 -0400 | [diff] [blame] | 94 | struct hsr_node, mac_list); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 95 | if (oldnode) { |
| 96 | list_replace_rcu(&oldnode->mac_list, &node->mac_list); |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 97 | spin_unlock_bh(&hsr->list_lock); |
| 98 | kfree_rcu(oldnode, rcu_head); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 99 | } else { |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 100 | list_add_tail_rcu(&node->mac_list, self_node_db); |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 101 | spin_unlock_bh(&hsr->list_lock); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | return 0; |
| 105 | } |
| 106 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 107 | void hsr_del_self_node(struct hsr_priv *hsr) |
Mao Wenan | 6caabe7 | 2019-03-06 22:45:01 +0800 | [diff] [blame] | 108 | { |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 109 | struct list_head *self_node_db = &hsr->self_node_db; |
Mao Wenan | 6caabe7 | 2019-03-06 22:45:01 +0800 | [diff] [blame] | 110 | struct hsr_node *node; |
| 111 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 112 | spin_lock_bh(&hsr->list_lock); |
Mao Wenan | 6caabe7 | 2019-03-06 22:45:01 +0800 | [diff] [blame] | 113 | node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); |
Mao Wenan | 6caabe7 | 2019-03-06 22:45:01 +0800 | [diff] [blame] | 114 | if (node) { |
| 115 | list_del_rcu(&node->mac_list); |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 116 | kfree_rcu(node, rcu_head); |
Mao Wenan | 6caabe7 | 2019-03-06 22:45:01 +0800 | [diff] [blame] | 117 | } |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 118 | spin_unlock_bh(&hsr->list_lock); |
Mao Wenan | 6caabe7 | 2019-03-06 22:45:01 +0800 | [diff] [blame] | 119 | } |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 120 | |
Cong Wang | b9a1e62 | 2019-07-03 17:21:13 -0700 | [diff] [blame] | 121 | void hsr_del_nodes(struct list_head *node_db) |
| 122 | { |
| 123 | struct hsr_node *node; |
| 124 | struct hsr_node *tmp; |
| 125 | |
| 126 | list_for_each_entry_safe(node, tmp, node_db, mac_list) |
| 127 | kfree(node); |
| 128 | } |
| 129 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 130 | /* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 131 | * seq_out is used to initialize filtering of outgoing duplicate frames |
| 132 | * originating from the newly added node. |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 133 | */ |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 134 | static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, |
| 135 | struct list_head *node_db, |
| 136 | unsigned char addr[], |
| 137 | u16 seq_out) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 138 | { |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 139 | struct hsr_node *new_node, *node; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 140 | unsigned long now; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 141 | int i; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 142 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 143 | new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); |
| 144 | if (!new_node) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 145 | return NULL; |
| 146 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 147 | ether_addr_copy(new_node->macaddress_A, addr); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 148 | |
| 149 | /* We are only interested in time diffs here, so use current jiffies |
| 150 | * as initialization. (0 could trigger an spurious ring error warning). |
| 151 | */ |
| 152 | now = jiffies; |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 153 | for (i = 0; i < HSR_PT_PORTS; i++) |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 154 | new_node->time_in[i] = now; |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 155 | for (i = 0; i < HSR_PT_PORTS; i++) |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 156 | new_node->seq_out[i] = seq_out; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 157 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 158 | spin_lock_bh(&hsr->list_lock); |
Amol Grover | a7a9456 | 2020-02-19 15:30:11 +0530 | [diff] [blame] | 159 | list_for_each_entry_rcu(node, node_db, mac_list, |
| 160 | lockdep_is_held(&hsr->list_lock)) { |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 161 | if (ether_addr_equal(node->macaddress_A, addr)) |
| 162 | goto out; |
| 163 | if (ether_addr_equal(node->macaddress_B, addr)) |
| 164 | goto out; |
| 165 | } |
| 166 | list_add_tail_rcu(&new_node->mac_list, node_db); |
| 167 | spin_unlock_bh(&hsr->list_lock); |
| 168 | return new_node; |
| 169 | out: |
| 170 | spin_unlock_bh(&hsr->list_lock); |
| 171 | kfree(new_node); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 172 | return node; |
| 173 | } |
| 174 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 175 | /* Get the hsr_node from which 'skb' was sent. |
| 176 | */ |
Karicheri, Muralidharan | 675c8da | 2017-06-12 15:06:26 -0400 | [diff] [blame] | 177 | struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 178 | bool is_sup) |
| 179 | { |
Karicheri, Muralidharan | 675c8da | 2017-06-12 15:06:26 -0400 | [diff] [blame] | 180 | struct list_head *node_db = &port->hsr->node_db; |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 181 | struct hsr_priv *hsr = port->hsr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 182 | struct hsr_node *node; |
| 183 | struct ethhdr *ethhdr; |
| 184 | u16 seq_out; |
| 185 | |
| 186 | if (!skb_mac_header_was_set(skb)) |
| 187 | return NULL; |
| 188 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 189 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 190 | |
| 191 | list_for_each_entry_rcu(node, node_db, mac_list) { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 192 | if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 193 | return node; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 194 | if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 195 | return node; |
| 196 | } |
| 197 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 198 | /* Everyone may create a node entry, connected node to a HSR device. */ |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 199 | |
Murali Karicheri | 0594778 | 2019-04-05 13:31:30 -0400 | [diff] [blame] | 200 | if (ethhdr->h_proto == htons(ETH_P_PRP) || |
| 201 | ethhdr->h_proto == htons(ETH_P_HSR)) { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 202 | /* Use the existing sequence_nr from the tag as starting point |
| 203 | * for filtering duplicate frames. |
| 204 | */ |
| 205 | seq_out = hsr_get_skb_sequence_nr(skb) - 1; |
| 206 | } else { |
Karicheri, Muralidharan | 675c8da | 2017-06-12 15:06:26 -0400 | [diff] [blame] | 207 | /* this is called also for frames from master port and |
| 208 | * so warn only for non master ports |
| 209 | */ |
| 210 | if (port->type != HSR_PT_MASTER) |
| 211 | WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 212 | seq_out = HSR_SEQNR_START; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 213 | } |
| 214 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 215 | return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 216 | } |
| 217 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 218 | /* Use the Supervision frame's info about an eventual macaddress_B for merging |
| 219 | * nodes that has previously had their macaddress_B registered as a separate |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 220 | * node. |
| 221 | */ |
| 222 | void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, |
| 223 | struct hsr_port *port_rcv) |
| 224 | { |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 225 | struct hsr_priv *hsr = port_rcv->hsr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 226 | struct hsr_sup_payload *hsr_sp; |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 227 | struct hsr_node *node_real; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 228 | struct list_head *node_db; |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 229 | struct ethhdr *ethhdr; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 230 | int i; |
| 231 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 232 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 233 | |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 234 | /* Leave the ethernet header. */ |
| 235 | skb_pull(skb, sizeof(struct ethhdr)); |
| 236 | |
| 237 | /* And leave the HSR tag. */ |
| 238 | if (ethhdr->h_proto == htons(ETH_P_HSR)) |
| 239 | skb_pull(skb, sizeof(struct hsr_tag)); |
| 240 | |
| 241 | /* And leave the HSR sup tag. */ |
| 242 | skb_pull(skb, sizeof(struct hsr_sup_tag)); |
| 243 | |
Murali Karicheri | 5fa9677 | 2019-04-05 13:31:29 -0400 | [diff] [blame] | 244 | hsr_sp = (struct hsr_sup_payload *)skb->data; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 245 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 246 | /* Merge node_curr (registered on macaddress_B) into node_real */ |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 247 | node_db = &port_rcv->hsr->node_db; |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 248 | node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 249 | if (!node_real) |
| 250 | /* No frame received from AddrA of this node yet */ |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 251 | node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A, |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 252 | HSR_SEQNR_START - 1); |
| 253 | if (!node_real) |
| 254 | goto done; /* No mem */ |
| 255 | if (node_real == node_curr) |
| 256 | /* Node has already been merged */ |
| 257 | goto done; |
| 258 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 259 | ether_addr_copy(node_real->macaddress_B, ethhdr->h_source); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 260 | for (i = 0; i < HSR_PT_PORTS; i++) { |
| 261 | if (!node_curr->time_in_stale[i] && |
| 262 | time_after(node_curr->time_in[i], node_real->time_in[i])) { |
| 263 | node_real->time_in[i] = node_curr->time_in[i]; |
Murali Karicheri | d595b85 | 2019-04-05 13:31:23 -0400 | [diff] [blame] | 264 | node_real->time_in_stale[i] = |
| 265 | node_curr->time_in_stale[i]; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 266 | } |
| 267 | if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i])) |
| 268 | node_real->seq_out[i] = node_curr->seq_out[i]; |
| 269 | } |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 270 | node_real->addr_B_port = port_rcv->type; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 271 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 272 | spin_lock_bh(&hsr->list_lock); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 273 | list_del_rcu(&node_curr->mac_list); |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 274 | spin_unlock_bh(&hsr->list_lock); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 275 | kfree_rcu(node_curr, rcu_head); |
| 276 | |
| 277 | done: |
Peter Heise | ee1c279 | 2016-04-13 13:52:22 +0200 | [diff] [blame] | 278 | skb_push(skb, sizeof(struct hsrv1_ethhdr_sp)); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 279 | } |
| 280 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 281 | /* 'skb' is a frame meant for this host, that is to be passed to upper layers. |
| 282 | * |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 283 | * If the frame was sent by a node's B interface, replace the source |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 284 | * address with that node's "official" address (macaddress_A) so that upper |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 285 | * layers recognize where it came from. |
| 286 | */ |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 287 | void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 288 | { |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 289 | if (!skb_mac_header_was_set(skb)) { |
| 290 | WARN_ONCE(1, "%s: Mac header not set\n", __func__); |
| 291 | return; |
| 292 | } |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 293 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 294 | memcpy(ð_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 295 | } |
| 296 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 297 | /* 'skb' is a frame meant for another host. |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 298 | * 'port' is the outgoing interface |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 299 | * |
| 300 | * Substitute the target (dest) MAC address if necessary, so the it matches the |
| 301 | * recipient interface MAC address, regardless of whether that is the |
| 302 | * recipient's A or B interface. |
| 303 | * This is needed to keep the packets flowing through switches that learn on |
| 304 | * which "side" the different interfaces are. |
| 305 | */ |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 306 | void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 307 | struct hsr_port *port) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 308 | { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 309 | struct hsr_node *node_dst; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 310 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 311 | if (!skb_mac_header_was_set(skb)) { |
| 312 | WARN_ONCE(1, "%s: Mac header not set\n", __func__); |
| 313 | return; |
| 314 | } |
| 315 | |
| 316 | if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest)) |
| 317 | return; |
| 318 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 319 | node_dst = find_node_by_addr_A(&port->hsr->node_db, |
| 320 | eth_hdr(skb)->h_dest); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 321 | if (!node_dst) { |
Taehee Yoo | 4b793ac | 2020-02-28 18:01:46 +0000 | [diff] [blame] | 322 | if (net_ratelimit()) |
| 323 | netdev_err(skb->dev, "%s: Unknown node\n", __func__); |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 324 | return; |
| 325 | } |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 326 | if (port->type != node_dst->addr_B_port) |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 327 | return; |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 328 | |
Murali Karicheri | eea9f73 | 2020-07-17 10:55:10 -0400 | [diff] [blame] | 329 | if (is_valid_ether_addr(node_dst->macaddress_B)) |
| 330 | ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 331 | } |
| 332 | |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 333 | void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, |
| 334 | u16 sequence_nr) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 335 | { |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 336 | /* Don't register incoming frames without a valid sequence number. This |
| 337 | * ensures entries of restarted nodes gets pruned so that they can |
| 338 | * re-register and resume communications. |
Arvid Brodin | 213e3bc | 2013-11-29 23:37:07 +0100 | [diff] [blame] | 339 | */ |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 340 | if (seq_nr_before(sequence_nr, node->seq_out[port->type])) |
| 341 | return; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 342 | |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 343 | node->time_in[port->type] = jiffies; |
| 344 | node->time_in_stale[port->type] = false; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 345 | } |
| 346 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 347 | /* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid |
| 348 | * ethhdr->h_source address and skb->mac_header set. |
| 349 | * |
| 350 | * Return: |
| 351 | * 1 if frame can be shown to have been sent recently on this interface, |
| 352 | * 0 otherwise, or |
| 353 | * negative error code on error |
| 354 | */ |
Arvid Brodin | f266a68 | 2014-07-04 23:41:03 +0200 | [diff] [blame] | 355 | int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, |
| 356 | u16 sequence_nr) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 357 | { |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 358 | if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type])) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 359 | return 1; |
| 360 | |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 361 | node->seq_out[port->type] = sequence_nr; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 362 | return 0; |
| 363 | } |
| 364 | |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 365 | static struct hsr_port *get_late_port(struct hsr_priv *hsr, |
| 366 | struct hsr_node *node) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 367 | { |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 368 | if (node->time_in_stale[HSR_PT_SLAVE_A]) |
| 369 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); |
| 370 | if (node->time_in_stale[HSR_PT_SLAVE_B]) |
| 371 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 372 | |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 373 | if (time_after(node->time_in[HSR_PT_SLAVE_B], |
| 374 | node->time_in[HSR_PT_SLAVE_A] + |
| 375 | msecs_to_jiffies(MAX_SLAVE_DIFF))) |
| 376 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); |
| 377 | if (time_after(node->time_in[HSR_PT_SLAVE_A], |
| 378 | node->time_in[HSR_PT_SLAVE_B] + |
| 379 | msecs_to_jiffies(MAX_SLAVE_DIFF))) |
| 380 | return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 381 | |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 382 | return NULL; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 383 | } |
| 384 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 385 | /* Remove stale sequence_nr records. Called by timer every |
| 386 | * HSR_LIFE_CHECK_INTERVAL (two seconds or so). |
| 387 | */ |
Kees Cook | dda436b | 2017-10-24 01:46:16 -0700 | [diff] [blame] | 388 | void hsr_prune_nodes(struct timer_list *t) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 389 | { |
Kees Cook | dda436b | 2017-10-24 01:46:16 -0700 | [diff] [blame] | 390 | struct hsr_priv *hsr = from_timer(hsr, t, prune_timer); |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 391 | struct hsr_node *node; |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 392 | struct hsr_node *tmp; |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 393 | struct hsr_port *port; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 394 | unsigned long timestamp; |
| 395 | unsigned long time_a, time_b; |
| 396 | |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 397 | spin_lock_bh(&hsr->list_lock); |
| 398 | list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) { |
Andreas Oetken | d2daa12 | 2019-05-23 13:57:14 +0200 | [diff] [blame] | 399 | /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A] |
| 400 | * nor time_in[HSR_PT_SLAVE_B], will ever be updated for |
| 401 | * the master port. Thus the master node will be repeatedly |
| 402 | * pruned leading to packet loss. |
| 403 | */ |
| 404 | if (hsr_addr_is_self(hsr, node->macaddress_A)) |
| 405 | continue; |
| 406 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 407 | /* Shorthand */ |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 408 | time_a = node->time_in[HSR_PT_SLAVE_A]; |
| 409 | time_b = node->time_in[HSR_PT_SLAVE_B]; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 410 | |
| 411 | /* Check for timestamps old enough to risk wrap-around */ |
Murali Karicheri | d131fcc | 2019-04-05 13:31:31 -0400 | [diff] [blame] | 412 | if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2)) |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 413 | node->time_in_stale[HSR_PT_SLAVE_A] = true; |
Murali Karicheri | d131fcc | 2019-04-05 13:31:31 -0400 | [diff] [blame] | 414 | if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2)) |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 415 | node->time_in_stale[HSR_PT_SLAVE_B] = true; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 416 | |
| 417 | /* Get age of newest frame from node. |
| 418 | * At least one time_in is OK here; nodes get pruned long |
| 419 | * before both time_ins can get stale |
| 420 | */ |
| 421 | timestamp = time_a; |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 422 | if (node->time_in_stale[HSR_PT_SLAVE_A] || |
| 423 | (!node->time_in_stale[HSR_PT_SLAVE_B] && |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 424 | time_after(time_b, time_a))) |
| 425 | timestamp = time_b; |
| 426 | |
| 427 | /* Warn of ring error only as long as we get frames at all */ |
| 428 | if (time_is_after_jiffies(timestamp + |
Murali Karicheri | d131fcc | 2019-04-05 13:31:31 -0400 | [diff] [blame] | 429 | msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) { |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 430 | rcu_read_lock(); |
| 431 | port = get_late_port(hsr, node); |
Murali Karicheri | 05ca6e6 | 2019-04-05 13:31:28 -0400 | [diff] [blame] | 432 | if (port) |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 433 | hsr_nl_ringerror(hsr, node->macaddress_A, port); |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 434 | rcu_read_unlock(); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 435 | } |
| 436 | |
| 437 | /* Prune old entries */ |
| 438 | if (time_is_before_jiffies(timestamp + |
Murali Karicheri | d595b85 | 2019-04-05 13:31:23 -0400 | [diff] [blame] | 439 | msecs_to_jiffies(HSR_NODE_FORGET_TIME))) { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 440 | hsr_nl_nodedown(hsr, node->macaddress_A); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 441 | list_del_rcu(&node->mac_list); |
| 442 | /* Note that we need to free this entry later: */ |
Wei Yongjun | 1aee6cc | 2013-12-16 14:05:50 +0800 | [diff] [blame] | 443 | kfree_rcu(node, rcu_head); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 444 | } |
| 445 | } |
Taehee Yoo | 92a3567 | 2019-12-22 11:26:54 +0000 | [diff] [blame] | 446 | spin_unlock_bh(&hsr->list_lock); |
Aaron Kramer | 5150b45 | 2019-04-05 13:31:36 -0400 | [diff] [blame] | 447 | |
| 448 | /* Restart timer */ |
| 449 | mod_timer(&hsr->prune_timer, |
| 450 | jiffies + msecs_to_jiffies(PRUNE_PERIOD)); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 451 | } |
| 452 | |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 453 | void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos, |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 454 | unsigned char addr[ETH_ALEN]) |
| 455 | { |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 456 | struct hsr_node *node; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 457 | |
| 458 | if (!_pos) { |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 459 | node = list_first_or_null_rcu(&hsr->node_db, |
| 460 | struct hsr_node, mac_list); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 461 | if (node) |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 462 | ether_addr_copy(addr, node->macaddress_A); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 463 | return node; |
| 464 | } |
| 465 | |
| 466 | node = _pos; |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 467 | list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) { |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 468 | ether_addr_copy(addr, node->macaddress_A); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 469 | return node; |
| 470 | } |
| 471 | |
| 472 | return NULL; |
| 473 | } |
| 474 | |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 475 | int hsr_get_node_data(struct hsr_priv *hsr, |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 476 | const unsigned char *addr, |
| 477 | unsigned char addr_b[ETH_ALEN], |
| 478 | unsigned int *addr_b_ifindex, |
| 479 | int *if1_age, |
| 480 | u16 *if1_seq, |
| 481 | int *if2_age, |
| 482 | u16 *if2_seq) |
| 483 | { |
Arvid Brodin | 70ebe4a | 2014-07-04 23:34:38 +0200 | [diff] [blame] | 484 | struct hsr_node *node; |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 485 | struct hsr_port *port; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 486 | unsigned long tdiff; |
| 487 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 488 | node = find_node_by_addr_A(&hsr->node_db, addr); |
Taehee Yoo | 173756b | 2020-03-13 06:50:14 +0000 | [diff] [blame] | 489 | if (!node) |
| 490 | return -ENOENT; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 491 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 492 | ether_addr_copy(addr_b, node->macaddress_B); |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 493 | |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 494 | tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A]; |
| 495 | if (node->time_in_stale[HSR_PT_SLAVE_A]) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 496 | *if1_age = INT_MAX; |
| 497 | #if HZ <= MSEC_PER_SEC |
| 498 | else if (tdiff > msecs_to_jiffies(INT_MAX)) |
| 499 | *if1_age = INT_MAX; |
| 500 | #endif |
| 501 | else |
| 502 | *if1_age = jiffies_to_msecs(tdiff); |
| 503 | |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 504 | tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B]; |
| 505 | if (node->time_in_stale[HSR_PT_SLAVE_B]) |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 506 | *if2_age = INT_MAX; |
| 507 | #if HZ <= MSEC_PER_SEC |
| 508 | else if (tdiff > msecs_to_jiffies(INT_MAX)) |
| 509 | *if2_age = INT_MAX; |
| 510 | #endif |
| 511 | else |
| 512 | *if2_age = jiffies_to_msecs(tdiff); |
| 513 | |
| 514 | /* Present sequence numbers as if they were incoming on interface */ |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 515 | *if1_seq = node->seq_out[HSR_PT_SLAVE_B]; |
| 516 | *if2_seq = node->seq_out[HSR_PT_SLAVE_A]; |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 517 | |
Murali Karicheri | b1b4aa9 | 2019-04-05 13:31:32 -0400 | [diff] [blame] | 518 | if (node->addr_B_port != HSR_PT_NONE) { |
| 519 | port = hsr_port_get_hsr(hsr, node->addr_B_port); |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 520 | *addr_b_ifindex = port->dev->ifindex; |
| 521 | } else { |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 522 | *addr_b_ifindex = -1; |
Arvid Brodin | c5a7591 | 2014-07-04 23:38:05 +0200 | [diff] [blame] | 523 | } |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 524 | |
Arvid Brodin | f421436 | 2013-10-30 21:10:47 +0100 | [diff] [blame] | 525 | return 0; |
| 526 | } |