blob: c5227d42faf56243e89db4027bdd79718e45ff3b [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001// SPDX-License-Identifier: GPL-2.0
Arvid Brodin81ba6af2014-07-04 23:35:24 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
Arvid Brodin81ba6af2014-07-04 23:35:24 +02004 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Murali Karicheri8f4c0e02020-07-22 10:40:16 -04006 *
7 * Frame handler other utility functions for HSR and PRP.
Arvid Brodin81ba6af2014-07-04 23:35:24 +02008 */
9
10#include "hsr_slave.h"
11#include <linux/etherdevice.h>
Arvid Brodin51f3c602014-07-04 23:37:27 +020012#include <linux/if_arp.h>
Parav Panditd0d7b102017-02-04 11:00:49 -060013#include <linux/if_vlan.h>
Arvid Brodin81ba6af2014-07-04 23:35:24 +020014#include "hsr_main.h"
Arvid Brodin51f3c602014-07-04 23:37:27 +020015#include "hsr_device.h"
Arvid Brodinf266a682014-07-04 23:41:03 +020016#include "hsr_forward.h"
Arvid Brodin81ba6af2014-07-04 23:35:24 +020017#include "hsr_framereg.h"
18
Murali Karicheri451d8122020-07-22 10:40:21 -040019bool hsr_invalid_dan_ingress_frame(__be16 protocol)
20{
21 return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
22}
23
Arvid Brodinf266a682014-07-04 23:41:03 +020024static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
25{
26 struct sk_buff *skb = *pskb;
27 struct hsr_port *port;
Murali Karicheri451d8122020-07-22 10:40:21 -040028 struct hsr_priv *hsr;
Murali Karicherif5dda312020-05-06 11:41:07 -040029 __be16 protocol;
Arvid Brodinf266a682014-07-04 23:41:03 +020030
Murali Karicheri451d8122020-07-22 10:40:21 -040031 /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
32 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
33 return RX_HANDLER_PASS;
34
Arvid Brodinf266a682014-07-04 23:41:03 +020035 if (!skb_mac_header_was_set(skb)) {
36 WARN_ONCE(1, "%s: skb invalid", __func__);
37 return RX_HANDLER_PASS;
38 }
39
Arvid Brodinf266a682014-07-04 23:41:03 +020040 port = hsr_port_get_rcu(skb->dev);
Eric Dumazet2b5b8252020-02-03 10:15:07 -080041 if (!port)
42 goto finish_pass;
Murali Karicheri451d8122020-07-22 10:40:21 -040043 hsr = port->hsr;
Arvid Brodinf266a682014-07-04 23:41:03 +020044
45 if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
46 /* Directly kill frames sent by ourselves */
47 kfree_skb(skb);
48 goto finish_consume;
49 }
50
George McCollisterdcf0cd12021-02-09 19:02:11 -060051 /* For HSR, only tagged frames are expected (unless the device offloads
52 * HSR tag removal), but for PRP there could be non tagged frames as
53 * well from Single attached nodes (SANs).
Murali Karicheri451d8122020-07-22 10:40:21 -040054 */
Peter Heiseee1c2792016-04-13 13:52:22 +020055 protocol = eth_hdr(skb)->h_proto;
George McCollisterdcf0cd12021-02-09 19:02:11 -060056
57 if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
58 hsr->proto_ops->invalid_dan_ingress_frame &&
Murali Karicheri451d8122020-07-22 10:40:21 -040059 hsr->proto_ops->invalid_dan_ingress_frame(protocol))
Arvid Brodinf266a682014-07-04 23:41:03 +020060 goto finish_pass;
61
62 skb_push(skb, ETH_HLEN);
63
Murali Karicheri451d8122020-07-22 10:40:21 -040064 if (skb_mac_header(skb) != skb->data) {
65 WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n",
66 __func__, __LINE__, port->dev->name);
67 goto finish_consume;
68 }
69
Arvid Brodinf266a682014-07-04 23:41:03 +020070 hsr_forward_skb(skb, port);
71
72finish_consume:
Arvid Brodinf266a682014-07-04 23:41:03 +020073 return RX_HANDLER_CONSUMED;
74
75finish_pass:
Arvid Brodinf266a682014-07-04 23:41:03 +020076 return RX_HANDLER_PASS;
77}
78
79bool hsr_port_exists(const struct net_device *dev)
80{
81 return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
82}
83
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000084static int hsr_check_dev_ok(struct net_device *dev,
85 struct netlink_ext_ack *extack)
Arvid Brodin51f3c602014-07-04 23:37:27 +020086{
87 /* Don't allow HSR on non-ethernet like devices */
Murali Karicheri56703422019-04-05 13:31:25 -040088 if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
89 dev->addr_len != ETH_ALEN) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000090 NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
Arvid Brodin51f3c602014-07-04 23:37:27 +020091 return -EINVAL;
92 }
93
94 /* Don't allow enslaving hsr devices */
95 if (is_hsr_master(dev)) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +000096 NL_SET_ERR_MSG_MOD(extack,
97 "Cannot create trees of HSR devices.");
Arvid Brodin51f3c602014-07-04 23:37:27 +020098 return -EINVAL;
99 }
100
Arvid Brodinc5a75912014-07-04 23:38:05 +0200101 if (hsr_port_exists(dev)) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000102 NL_SET_ERR_MSG_MOD(extack,
103 "This device is already a HSR slave.");
Arvid Brodin51f3c602014-07-04 23:37:27 +0200104 return -EINVAL;
105 }
106
Parav Panditd0d7b102017-02-04 11:00:49 -0600107 if (is_vlan_dev(dev)) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000108 NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
Arvid Brodin51f3c602014-07-04 23:37:27 +0200109 return -EINVAL;
110 }
111
Arvid Brodinf266a682014-07-04 23:41:03 +0200112 if (dev->priv_flags & IFF_DONT_BRIDGE) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000113 NL_SET_ERR_MSG_MOD(extack,
114 "This device does not support bridging.");
Arvid Brodinf266a682014-07-04 23:41:03 +0200115 return -EOPNOTSUPP;
116 }
117
Arvid Brodin51f3c602014-07-04 23:37:27 +0200118 /* HSR over bonded devices has not been tested, but I'm not sure it
119 * won't work...
120 */
121
122 return 0;
123}
124
Arvid Brodinc5a75912014-07-04 23:38:05 +0200125/* Setup device to be added to the HSR bridge. */
Taehee Yooe0a4b992020-02-28 18:02:10 +0000126static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
127 struct hsr_port *port,
128 struct netlink_ext_ack *extack)
129
Arvid Brodin51f3c602014-07-04 23:37:27 +0200130{
Taehee Yooe0a4b992020-02-28 18:02:10 +0000131 struct net_device *hsr_dev;
132 struct hsr_port *master;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200133 int res;
134
Arvid Brodin51f3c602014-07-04 23:37:27 +0200135 res = dev_set_promiscuity(dev, 1);
136 if (res)
Taehee Yoo56dc0a02020-03-05 00:02:54 +0000137 return res;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200138
Taehee Yooe0a4b992020-02-28 18:02:10 +0000139 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
140 hsr_dev = master->dev;
141
142 res = netdev_upper_dev_link(dev, hsr_dev, extack);
143 if (res)
144 goto fail_upper_dev_link;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200145
Arvid Brodinf266a682014-07-04 23:41:03 +0200146 res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
147 if (res)
148 goto fail_rx_handler;
149 dev_disable_lro(dev);
150
Arvid Brodin51f3c602014-07-04 23:37:27 +0200151 return 0;
152
153fail_rx_handler:
Taehee Yooe0a4b992020-02-28 18:02:10 +0000154 netdev_upper_dev_unlink(dev, hsr_dev);
155fail_upper_dev_link:
Arvid Brodin51f3c602014-07-04 23:37:27 +0200156 dev_set_promiscuity(dev, -1);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200157 return res;
158}
159
Arvid Brodinc5a75912014-07-04 23:38:05 +0200160int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000161 enum hsr_port_type type, struct netlink_ext_ack *extack)
Arvid Brodin51f3c602014-07-04 23:37:27 +0200162{
Arvid Brodinc5a75912014-07-04 23:38:05 +0200163 struct hsr_port *port, *master;
164 int res;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200165
Arvid Brodinc5a75912014-07-04 23:38:05 +0200166 if (type != HSR_PT_MASTER) {
Taehee Yoo13eeb5f2020-02-28 18:01:35 +0000167 res = hsr_check_dev_ok(dev, extack);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200168 if (res)
169 return res;
Arvid Brodin51f3c602014-07-04 23:37:27 +0200170 }
171
Arvid Brodinc5a75912014-07-04 23:38:05 +0200172 port = hsr_port_get_hsr(hsr, type);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400173 if (port)
Arvid Brodinc5a75912014-07-04 23:38:05 +0200174 return -EBUSY; /* This port already exists */
175
176 port = kzalloc(sizeof(*port), GFP_KERNEL);
Murali Karicheri05ca6e62019-04-05 13:31:28 -0400177 if (!port)
Arvid Brodinc5a75912014-07-04 23:38:05 +0200178 return -ENOMEM;
179
Taehee Yoo3a303cf2020-03-21 06:46:50 +0000180 port->hsr = hsr;
181 port->dev = dev;
182 port->type = type;
183
Arvid Brodinc5a75912014-07-04 23:38:05 +0200184 if (type != HSR_PT_MASTER) {
Taehee Yooe0a4b992020-02-28 18:02:10 +0000185 res = hsr_portdev_setup(hsr, dev, port, extack);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200186 if (res)
187 goto fail_dev_setup;
188 }
189
Arvid Brodinc5a75912014-07-04 23:38:05 +0200190 list_add_tail_rcu(&port->port_list, &hsr->ports);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200191 synchronize_rcu();
Arvid Brodinc5a75912014-07-04 23:38:05 +0200192
193 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
Arvid Brodin1cc1eb52014-07-04 23:38:57 +0200194 netdev_update_features(master->dev);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200195 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
196
197 return 0;
198
199fail_dev_setup:
200 kfree(port);
201 return res;
202}
203
204void hsr_del_port(struct hsr_port *port)
205{
206 struct hsr_priv *hsr;
207 struct hsr_port *master;
208
209 hsr = port->hsr;
210 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
211 list_del_rcu(&port->port_list);
212
213 if (port != master) {
Taehee Yooe0a4b992020-02-28 18:02:10 +0000214 netdev_update_features(master->dev);
215 dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
Arvid Brodinc5a75912014-07-04 23:38:05 +0200216 netdev_rx_handler_unregister(port->dev);
217 dev_set_promiscuity(port->dev, -1);
Taehee Yooe0a4b992020-02-28 18:02:10 +0000218 netdev_upper_dev_unlink(port->dev, master->dev);
Arvid Brodinc5a75912014-07-04 23:38:05 +0200219 }
220
Arvid Brodinc5a75912014-07-04 23:38:05 +0200221 synchronize_rcu();
Arvid Brodin56b08fd2015-02-27 21:26:03 +0100222
Cong Wang619afef02019-07-03 17:21:12 -0700223 kfree(port);
Arvid Brodin51f3c602014-07-04 23:37:27 +0200224}