blob: 53d1f7a8246308e437b7b8a20a6f677e891b5e1c [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001/* SPDX-License-Identifier: GPL-2.0 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Murali Karicheri8f4c0e02020-07-22 10:40:16 -04006 *
7 * include file for HSR and PRP.
Arvid Brodinf4214362013-10-30 21:10:47 +01008 */
9
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020010#ifndef __HSR_PRIVATE_H
11#define __HSR_PRIVATE_H
Arvid Brodinf4214362013-10-30 21:10:47 +010012
13#include <linux/netdevice.h>
14#include <linux/list.h>
Murali Karicheri451d8122020-07-22 10:40:21 -040015#include <linux/if_vlan.h>
George McCollisterdcf0cd12021-02-09 19:02:11 -060016#include <linux/if_hsr.h>
Arvid Brodinf4214362013-10-30 21:10:47 +010017
Arvid Brodinf4214362013-10-30 21:10:47 +010018/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
19 * Table 8.
20 * All values in milliseconds.
21 */
22#define HSR_LIFE_CHECK_INTERVAL 2000 /* ms */
23#define HSR_NODE_FORGET_TIME 60000 /* ms */
24#define HSR_ANNOUNCE_INTERVAL 100 /* ms */
Marco Wenzelf1764112021-02-24 10:46:49 +010025#define HSR_ENTRY_FORGET_TIME 400 /* ms */
Arvid Brodinf4214362013-10-30 21:10:47 +010026
Arvid Brodinf4214362013-10-30 21:10:47 +010027/* By how much may slave1 and slave2 timestamps of latest received frame from
28 * each node differ before we notify of communication problem?
29 */
30#define MAX_SLAVE_DIFF 3000 /* ms */
Arvid Brodinf266a682014-07-04 23:41:03 +020031#define HSR_SEQNR_START (USHRT_MAX - 1024)
Peter Heiseee1c2792016-04-13 13:52:22 +020032#define HSR_SUP_SEQNR_START (HSR_SEQNR_START / 2)
Arvid Brodinf4214362013-10-30 21:10:47 +010033
Arvid Brodinf4214362013-10-30 21:10:47 +010034/* How often shall we check for broken ring and remove node entries older than
35 * HSR_NODE_FORGET_TIME?
36 */
37#define PRUNE_PERIOD 3000 /* ms */
38
Arvid Brodinf4214362013-10-30 21:10:47 +010039#define HSR_TLV_ANNOUNCE 22
40#define HSR_TLV_LIFE_CHECK 23
Murali Karicheric643ff02020-07-22 10:40:19 -040041/* PRP V1 life check for Duplicate discard */
42#define PRP_TLV_LIFE_CHECK_DD 20
43/* PRP V1 life check for Duplicate Accept */
44#define PRP_TLV_LIFE_CHECK_DA 21
Arvid Brodinf4214362013-10-30 21:10:47 +010045
Arvid Brodinf4214362013-10-30 21:10:47 +010046/* HSR Tag.
47 * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
48 * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
49 * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
50 * encapsulated protocol } instead.
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020051 *
52 * Field names as defined in the IEC:2010 standard for HSR.
Arvid Brodinf4214362013-10-30 21:10:47 +010053 */
Arvid Brodinf4214362013-10-30 21:10:47 +010054struct hsr_tag {
55 __be16 path_and_LSDU_size;
56 __be16 sequence_nr;
57 __be16 encap_proto;
58} __packed;
59
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020060#define HSR_HLEN 6
Arvid Brodinf4214362013-10-30 21:10:47 +010061
Peter Heiseee1c2792016-04-13 13:52:22 +020062#define HSR_V1_SUP_LSDUSIZE 52
63
Arvid Brodinf4214362013-10-30 21:10:47 +010064/* The helper functions below assumes that 'path' occupies the 4 most
65 * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
66 * equivalently, the 4 most significant bits of HSR tag byte 14).
67 *
68 * This is unclear in the IEC specification; its definition of MAC addresses
69 * indicates the spec is written with the least significant bit first (to the
70 * left). This, however, would mean that the LSDU field would be split in two
71 * with the path field in-between, which seems strange. I'm guessing the MAC
72 * address definition is in error.
73 */
Arvid Brodinf4214362013-10-30 21:10:47 +010074
75static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path)
76{
Murali Karicherid595b852019-04-05 13:31:23 -040077 ht->path_and_LSDU_size =
78 htons((ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
Arvid Brodinf4214362013-10-30 21:10:47 +010079}
80
81static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size)
82{
Murali Karicheri0525fc02019-04-05 13:31:27 -040083 ht->path_and_LSDU_size = htons((ntohs(ht->path_and_LSDU_size) &
84 0xF000) | (LSDU_size & 0x0FFF));
Arvid Brodinf4214362013-10-30 21:10:47 +010085}
86
87struct hsr_ethhdr {
88 struct ethhdr ethhdr;
89 struct hsr_tag hsr_tag;
90} __packed;
91
Murali Karicheri451d8122020-07-22 10:40:21 -040092struct hsr_vlan_ethhdr {
93 struct vlan_ethhdr vlanhdr;
94 struct hsr_tag hsr_tag;
95} __packed;
96
97/* HSR/PRP Supervision Frame data types.
Arvid Brodinf4214362013-10-30 21:10:47 +010098 * Field names as defined in the IEC:2010 standard for HSR.
99 */
100struct hsr_sup_tag {
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400101 __be16 path_and_HSR_ver;
Arvid Brodinf4214362013-10-30 21:10:47 +0100102 __be16 sequence_nr;
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400103 __u8 HSR_TLV_type;
104 __u8 HSR_TLV_length;
Arvid Brodinf4214362013-10-30 21:10:47 +0100105} __packed;
106
107struct hsr_sup_payload {
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400108 unsigned char macaddress_A[ETH_ALEN];
Arvid Brodinf4214362013-10-30 21:10:47 +0100109} __packed;
110
Arvid Brodinf4214362013-10-30 21:10:47 +0100111static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path)
112{
Murali Karicheri5fa96772019-04-05 13:31:29 -0400113 set_hsr_tag_path((struct hsr_tag *)hst, path);
Arvid Brodinf4214362013-10-30 21:10:47 +0100114}
115
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400116static inline void set_hsr_stag_HSR_ver(struct hsr_sup_tag *hst, u16 HSR_ver)
Arvid Brodinf4214362013-10-30 21:10:47 +0100117{
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400118 set_hsr_tag_LSDU_size((struct hsr_tag *)hst, HSR_ver);
Arvid Brodinf4214362013-10-30 21:10:47 +0100119}
120
Peter Heiseee1c2792016-04-13 13:52:22 +0200121struct hsrv0_ethhdr_sp {
Arvid Brodinf4214362013-10-30 21:10:47 +0100122 struct ethhdr ethhdr;
123 struct hsr_sup_tag hsr_sup;
124} __packed;
125
Peter Heiseee1c2792016-04-13 13:52:22 +0200126struct hsrv1_ethhdr_sp {
127 struct ethhdr ethhdr;
128 struct hsr_tag hsr;
129 struct hsr_sup_tag hsr_sup;
130} __packed;
131
Arvid Brodinc5a75912014-07-04 23:38:05 +0200132enum hsr_port_type {
133 HSR_PT_NONE = 0, /* Must be 0, used by framereg */
134 HSR_PT_SLAVE_A,
135 HSR_PT_SLAVE_B,
136 HSR_PT_INTERLINK,
137 HSR_PT_MASTER,
138 HSR_PT_PORTS, /* This must be the last item in the enum */
Arvid Brodinf4214362013-10-30 21:10:47 +0100139};
Arvid Brodinc5a75912014-07-04 23:38:05 +0200140
Murali Karicheric643ff02020-07-22 10:40:19 -0400141/* PRP Redunancy Control Trailor (RCT).
142 * As defined in IEC-62439-4:2012, the PRP RCT is really { sequence Nr,
143 * Lan indentifier (LanId), LSDU_size and PRP_suffix = 0x88FB }.
144 *
145 * Field names as defined in the IEC:2012 standard for PRP.
146 */
147struct prp_rct {
148 __be16 sequence_nr;
149 __be16 lan_id_and_LSDU_size;
150 __be16 PRP_suffix;
151} __packed;
152
Murali Karicheri451d8122020-07-22 10:40:21 -0400153static inline u16 get_prp_LSDU_size(struct prp_rct *rct)
154{
155 return ntohs(rct->lan_id_and_LSDU_size) & 0x0FFF;
156}
157
158static inline void set_prp_lan_id(struct prp_rct *rct, u16 lan_id)
159{
160 rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
161 0x0FFF) | (lan_id << 12));
162}
Murali Karicheric643ff02020-07-22 10:40:19 -0400163static inline void set_prp_LSDU_size(struct prp_rct *rct, u16 LSDU_size)
164{
165 rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
166 0xF000) | (LSDU_size & 0x0FFF));
167}
168
Arvid Brodinc5a75912014-07-04 23:38:05 +0200169struct hsr_port {
170 struct list_head port_list;
171 struct net_device *dev;
172 struct hsr_priv *hsr;
173 enum hsr_port_type type;
174};
Arvid Brodinf4214362013-10-30 21:10:47 +0100175
Murali Karicherifa4dc892020-07-22 10:40:20 -0400176struct hsr_frame_info;
Murali Karicheri451d8122020-07-22 10:40:21 -0400177struct hsr_node;
Murali Karicherifa4dc892020-07-22 10:40:20 -0400178
Murali Karicheri28e458e2020-07-22 10:40:18 -0400179struct hsr_proto_ops {
180 /* format and send supervision frame */
181 void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval);
Murali Karicheri451d8122020-07-22 10:40:21 -0400182 void (*handle_san_frame)(bool san, enum hsr_port_type port,
183 struct hsr_node *node);
184 bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port);
Murali Karicherifa4dc892020-07-22 10:40:20 -0400185 struct sk_buff * (*get_untagged_frame)(struct hsr_frame_info *frame,
186 struct hsr_port *port);
187 struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
188 struct hsr_port *port);
George McCollister48b491a2021-05-24 13:50:54 -0500189 int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
190 struct hsr_frame_info *frame);
Murali Karicheri451d8122020-07-22 10:40:21 -0400191 bool (*invalid_dan_ingress_frame)(__be16 protocol);
192 void (*update_san_info)(struct hsr_node *node, bool is_sup);
Murali Karicheri28e458e2020-07-22 10:40:18 -0400193};
194
Arvid Brodinf4214362013-10-30 21:10:47 +0100195struct hsr_priv {
Arvid Brodinf4214362013-10-30 21:10:47 +0100196 struct rcu_head rcu_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200197 struct list_head ports;
Arvid Brodinf266a682014-07-04 23:41:03 +0200198 struct list_head node_db; /* Known HSR nodes */
Arvid Brodinf4214362013-10-30 21:10:47 +0100199 struct list_head self_node_db; /* MACs of slaves */
200 struct timer_list announce_timer; /* Supervision frame dispatch */
Arvid Brodinabff7162014-07-04 23:35:47 +0200201 struct timer_list prune_timer;
Arvid Brodinf4214362013-10-30 21:10:47 +0100202 int announce_count;
203 u16 sequence_nr;
Murali Karicherid595b852019-04-05 13:31:23 -0400204 u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400205 enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */
Taehee Yoo92a35672019-12-22 11:26:54 +0000206 spinlock_t seqnr_lock; /* locking for sequence_nr */
207 spinlock_t list_lock; /* locking for node list */
Murali Karicheri28e458e2020-07-22 10:40:18 -0400208 struct hsr_proto_ops *proto_ops;
Murali Karicheri451d8122020-07-22 10:40:21 -0400209#define PRP_LAN_ID 0x5 /* 0x1010 for A and 0x1011 for B. Bit 0 is set
210 * based on SLAVE_A or SLAVE_B
211 */
212 u8 net_id; /* for PRP, it occupies most significant 3 bits
213 * of lan_id
214 */
Andreas Oetken6c9f18f2021-02-02 10:03:04 +0100215 unsigned char sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16));
216 /* Align to u16 boundary to avoid unaligned access
217 * in ether_addr_equal
218 */
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400219#ifdef CONFIG_DEBUG_FS
220 struct dentry *node_tbl_root;
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400221#endif
Arvid Brodinf4214362013-10-30 21:10:47 +0100222};
223
Arvid Brodinf266a682014-07-04 23:41:03 +0200224#define hsr_for_each_port(hsr, port) \
225 list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
226
Arvid Brodinc5a75912014-07-04 23:38:05 +0200227struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
Arvid Brodinf4214362013-10-30 21:10:47 +0100228
Arvid Brodinf266a682014-07-04 23:41:03 +0200229/* Caller must ensure skb is a valid HSR frame */
230static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
231{
232 struct hsr_ethhdr *hsr_ethhdr;
233
Murali Karicheri5fa96772019-04-05 13:31:29 -0400234 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200235 return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
236}
237
Murali Karicheri451d8122020-07-22 10:40:21 -0400238static inline struct prp_rct *skb_get_PRP_rct(struct sk_buff *skb)
239{
240 unsigned char *tail = skb_tail_pointer(skb) - HSR_HLEN;
241
242 struct prp_rct *rct = (struct prp_rct *)tail;
243
244 if (rct->PRP_suffix == htons(ETH_P_PRP))
245 return rct;
246
247 return NULL;
248}
249
250/* Assume caller has confirmed this skb is PRP suffixed */
251static inline u16 prp_get_skb_sequence_nr(struct prp_rct *rct)
252{
253 return ntohs(rct->sequence_nr);
254}
255
256static inline u16 get_prp_lan_id(struct prp_rct *rct)
257{
258 return ntohs(rct->lan_id_and_LSDU_size) >> 12;
259}
260
261/* assume there is a valid rct */
262static inline bool prp_check_lsdu_size(struct sk_buff *skb,
263 struct prp_rct *rct,
264 bool is_sup)
265{
266 struct ethhdr *ethhdr;
267 int expected_lsdu_size;
268
269 if (is_sup) {
270 expected_lsdu_size = HSR_V1_SUP_LSDUSIZE;
271 } else {
272 ethhdr = (struct ethhdr *)skb_mac_header(skb);
273 expected_lsdu_size = skb->len - 14;
274 if (ethhdr->h_proto == htons(ETH_P_8021Q))
275 expected_lsdu_size -= 4;
276 }
277
278 return (expected_lsdu_size == get_prp_LSDU_size(rct));
279}
280
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400281#if IS_ENABLED(CONFIG_DEBUG_FS)
Taehee Yoo4c2d5e32019-12-22 11:26:39 +0000282void hsr_debugfs_rename(struct net_device *dev);
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000283void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
Murali Karicheri9c5f8a192019-04-15 11:36:01 -0400284void hsr_debugfs_term(struct hsr_priv *priv);
Taehee Yooc6c4ccd2019-12-22 11:26:27 +0000285void hsr_debugfs_create_root(void);
286void hsr_debugfs_remove_root(void);
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400287#else
xiaofeng.yan80892772020-01-20 14:26:39 +0800288static inline void hsr_debugfs_rename(struct net_device *dev)
Taehee Yoo4c2d5e32019-12-22 11:26:39 +0000289{
290}
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000291static inline void hsr_debugfs_init(struct hsr_priv *priv,
292 struct net_device *hsr_dev)
293{}
Murali Karicheri9c5f8a192019-04-15 11:36:01 -0400294static inline void hsr_debugfs_term(struct hsr_priv *priv)
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400295{}
Taehee Yooc6c4ccd2019-12-22 11:26:27 +0000296static inline void hsr_debugfs_create_root(void)
297{}
298static inline void hsr_debugfs_remove_root(void)
299{}
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400300#endif
301
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200302#endif /* __HSR_PRIVATE_H */