blob: acab9c353a49f516c4a8b182265b9b70e8a5fdd1 [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001/* SPDX-License-Identifier: GPL-2.0 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Arvid Brodinf4214362013-10-30 21:10:47 +01006 */
7
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02008#ifndef __HSR_PRIVATE_H
9#define __HSR_PRIVATE_H
Arvid Brodinf4214362013-10-30 21:10:47 +010010
11#include <linux/netdevice.h>
12#include <linux/list.h>
13
Arvid Brodinf4214362013-10-30 21:10:47 +010014/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
15 * Table 8.
16 * All values in milliseconds.
17 */
18#define HSR_LIFE_CHECK_INTERVAL 2000 /* ms */
19#define HSR_NODE_FORGET_TIME 60000 /* ms */
20#define HSR_ANNOUNCE_INTERVAL 100 /* ms */
21
Arvid Brodinf4214362013-10-30 21:10:47 +010022/* By how much may slave1 and slave2 timestamps of latest received frame from
23 * each node differ before we notify of communication problem?
24 */
25#define MAX_SLAVE_DIFF 3000 /* ms */
Arvid Brodinf266a682014-07-04 23:41:03 +020026#define HSR_SEQNR_START (USHRT_MAX - 1024)
Peter Heiseee1c2792016-04-13 13:52:22 +020027#define HSR_SUP_SEQNR_START (HSR_SEQNR_START / 2)
Arvid Brodinf4214362013-10-30 21:10:47 +010028
Arvid Brodinf4214362013-10-30 21:10:47 +010029/* How often shall we check for broken ring and remove node entries older than
30 * HSR_NODE_FORGET_TIME?
31 */
32#define PRUNE_PERIOD 3000 /* ms */
33
Arvid Brodinf4214362013-10-30 21:10:47 +010034#define HSR_TLV_ANNOUNCE 22
35#define HSR_TLV_LIFE_CHECK 23
36
Arvid Brodinf4214362013-10-30 21:10:47 +010037/* HSR Tag.
38 * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
39 * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
40 * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
41 * encapsulated protocol } instead.
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020042 *
43 * Field names as defined in the IEC:2010 standard for HSR.
Arvid Brodinf4214362013-10-30 21:10:47 +010044 */
Arvid Brodinf4214362013-10-30 21:10:47 +010045struct hsr_tag {
46 __be16 path_and_LSDU_size;
47 __be16 sequence_nr;
48 __be16 encap_proto;
49} __packed;
50
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020051#define HSR_HLEN 6
Arvid Brodinf4214362013-10-30 21:10:47 +010052
Peter Heiseee1c2792016-04-13 13:52:22 +020053#define HSR_V1_SUP_LSDUSIZE 52
54
Arvid Brodinf4214362013-10-30 21:10:47 +010055/* The helper functions below assumes that 'path' occupies the 4 most
56 * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
57 * equivalently, the 4 most significant bits of HSR tag byte 14).
58 *
59 * This is unclear in the IEC specification; its definition of MAC addresses
60 * indicates the spec is written with the least significant bit first (to the
61 * left). This, however, would mean that the LSDU field would be split in two
62 * with the path field in-between, which seems strange. I'm guessing the MAC
63 * address definition is in error.
64 */
65static inline u16 get_hsr_tag_path(struct hsr_tag *ht)
66{
67 return ntohs(ht->path_and_LSDU_size) >> 12;
68}
69
70static inline u16 get_hsr_tag_LSDU_size(struct hsr_tag *ht)
71{
72 return ntohs(ht->path_and_LSDU_size) & 0x0FFF;
73}
74
75static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path)
76{
Murali Karicherid595b852019-04-05 13:31:23 -040077 ht->path_and_LSDU_size =
78 htons((ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
Arvid Brodinf4214362013-10-30 21:10:47 +010079}
80
81static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size)
82{
Murali Karicheri0525fc02019-04-05 13:31:27 -040083 ht->path_and_LSDU_size = htons((ntohs(ht->path_and_LSDU_size) &
84 0xF000) | (LSDU_size & 0x0FFF));
Arvid Brodinf4214362013-10-30 21:10:47 +010085}
86
87struct hsr_ethhdr {
88 struct ethhdr ethhdr;
89 struct hsr_tag hsr_tag;
90} __packed;
91
Arvid Brodinf4214362013-10-30 21:10:47 +010092/* HSR Supervision Frame data types.
93 * Field names as defined in the IEC:2010 standard for HSR.
94 */
95struct hsr_sup_tag {
Murali Karicherib1b4aa92019-04-05 13:31:32 -040096 __be16 path_and_HSR_ver;
Arvid Brodinf4214362013-10-30 21:10:47 +010097 __be16 sequence_nr;
Murali Karicherib1b4aa92019-04-05 13:31:32 -040098 __u8 HSR_TLV_type;
99 __u8 HSR_TLV_length;
Arvid Brodinf4214362013-10-30 21:10:47 +0100100} __packed;
101
102struct hsr_sup_payload {
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400103 unsigned char macaddress_A[ETH_ALEN];
Arvid Brodinf4214362013-10-30 21:10:47 +0100104} __packed;
105
106static inline u16 get_hsr_stag_path(struct hsr_sup_tag *hst)
107{
Murali Karicheri5fa96772019-04-05 13:31:29 -0400108 return get_hsr_tag_path((struct hsr_tag *)hst);
Arvid Brodinf4214362013-10-30 21:10:47 +0100109}
110
111static inline u16 get_hsr_stag_HSR_ver(struct hsr_sup_tag *hst)
112{
Murali Karicheri5fa96772019-04-05 13:31:29 -0400113 return get_hsr_tag_LSDU_size((struct hsr_tag *)hst);
Arvid Brodinf4214362013-10-30 21:10:47 +0100114}
115
116static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path)
117{
Murali Karicheri5fa96772019-04-05 13:31:29 -0400118 set_hsr_tag_path((struct hsr_tag *)hst, path);
Arvid Brodinf4214362013-10-30 21:10:47 +0100119}
120
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400121static inline void set_hsr_stag_HSR_ver(struct hsr_sup_tag *hst, u16 HSR_ver)
Arvid Brodinf4214362013-10-30 21:10:47 +0100122{
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400123 set_hsr_tag_LSDU_size((struct hsr_tag *)hst, HSR_ver);
Arvid Brodinf4214362013-10-30 21:10:47 +0100124}
125
Peter Heiseee1c2792016-04-13 13:52:22 +0200126struct hsrv0_ethhdr_sp {
Arvid Brodinf4214362013-10-30 21:10:47 +0100127 struct ethhdr ethhdr;
128 struct hsr_sup_tag hsr_sup;
129} __packed;
130
Peter Heiseee1c2792016-04-13 13:52:22 +0200131struct hsrv1_ethhdr_sp {
132 struct ethhdr ethhdr;
133 struct hsr_tag hsr;
134 struct hsr_sup_tag hsr_sup;
135} __packed;
136
Arvid Brodinc5a75912014-07-04 23:38:05 +0200137enum hsr_port_type {
138 HSR_PT_NONE = 0, /* Must be 0, used by framereg */
139 HSR_PT_SLAVE_A,
140 HSR_PT_SLAVE_B,
141 HSR_PT_INTERLINK,
142 HSR_PT_MASTER,
143 HSR_PT_PORTS, /* This must be the last item in the enum */
Arvid Brodinf4214362013-10-30 21:10:47 +0100144};
Arvid Brodinc5a75912014-07-04 23:38:05 +0200145
146struct hsr_port {
147 struct list_head port_list;
148 struct net_device *dev;
149 struct hsr_priv *hsr;
150 enum hsr_port_type type;
151};
Arvid Brodinf4214362013-10-30 21:10:47 +0100152
153struct hsr_priv {
Arvid Brodinf4214362013-10-30 21:10:47 +0100154 struct rcu_head rcu_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200155 struct list_head ports;
Arvid Brodinf266a682014-07-04 23:41:03 +0200156 struct list_head node_db; /* Known HSR nodes */
Arvid Brodinf4214362013-10-30 21:10:47 +0100157 struct list_head self_node_db; /* MACs of slaves */
158 struct timer_list announce_timer; /* Supervision frame dispatch */
Arvid Brodinabff7162014-07-04 23:35:47 +0200159 struct timer_list prune_timer;
Arvid Brodinf4214362013-10-30 21:10:47 +0100160 int announce_count;
161 u16 sequence_nr;
Murali Karicherid595b852019-04-05 13:31:23 -0400162 u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400163 u8 prot_version; /* Indicate if HSRv0 or HSRv1. */
Arvid Brodinf4214362013-10-30 21:10:47 +0100164 spinlock_t seqnr_lock; /* locking for sequence_nr */
165 unsigned char sup_multicast_addr[ETH_ALEN];
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400166#ifdef CONFIG_DEBUG_FS
167 struct dentry *node_tbl_root;
168 struct dentry *node_tbl_file;
169#endif
Arvid Brodinf4214362013-10-30 21:10:47 +0100170};
171
Arvid Brodinf266a682014-07-04 23:41:03 +0200172#define hsr_for_each_port(hsr, port) \
173 list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
174
Arvid Brodinc5a75912014-07-04 23:38:05 +0200175struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
Arvid Brodinf4214362013-10-30 21:10:47 +0100176
Arvid Brodinf266a682014-07-04 23:41:03 +0200177/* Caller must ensure skb is a valid HSR frame */
178static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
179{
180 struct hsr_ethhdr *hsr_ethhdr;
181
Murali Karicheri5fa96772019-04-05 13:31:29 -0400182 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200183 return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
184}
185
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400186#if IS_ENABLED(CONFIG_DEBUG_FS)
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000187void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
Murali Karicheri9c5f8a192019-04-15 11:36:01 -0400188void hsr_debugfs_term(struct hsr_priv *priv);
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400189#else
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000190static inline void hsr_debugfs_init(struct hsr_priv *priv,
191 struct net_device *hsr_dev)
192{}
Murali Karicheri9c5f8a192019-04-15 11:36:01 -0400193static inline void hsr_debugfs_term(struct hsr_priv *priv)
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400194{}
195#endif
196
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200197#endif /* __HSR_PRIVATE_H */