blob: 0b2c18efc07912fd0ea1e477a3fd3828da5c622e [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jiri Pirko007f7902014-11-28 14:34:17 +01002/*
3 * net/switchdev/switchdev.c - Switch device API
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
Scott Feldmanf8f21472015-03-09 13:59:09 -07005 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
Jiri Pirko007f7902014-11-28 14:34:17 +01006 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/init.h>
Jiri Pirko03bf0c22015-01-15 23:49:36 +010011#include <linux/mutex.h>
12#include <linux/notifier.h>
Jiri Pirko007f7902014-11-28 14:34:17 +010013#include <linux/netdevice.h>
Jiri Pirko850d0cb2015-10-14 19:40:51 +020014#include <linux/etherdevice.h>
Scott Feldman47f83282015-05-10 09:47:56 -070015#include <linux/if_bridge.h>
Jiri Pirko7ea6eb32015-09-24 10:02:41 +020016#include <linux/list.h>
Jiri Pirko793f4012015-10-14 19:40:48 +020017#include <linux/workqueue.h>
Nikolay Aleksandrov87aaf2c2015-10-12 14:31:01 +020018#include <linux/if_vlan.h>
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +010019#include <linux/rtnetlink.h>
Jiri Pirko007f7902014-11-28 14:34:17 +010020#include <net/switchdev.h>
21
Jiri Pirko793f4012015-10-14 19:40:48 +020022static LIST_HEAD(deferred);
23static DEFINE_SPINLOCK(deferred_lock);
24
25typedef void switchdev_deferred_func_t(struct net_device *dev,
26 const void *data);
27
28struct switchdev_deferred_item {
29 struct list_head list;
30 struct net_device *dev;
31 switchdev_deferred_func_t *func;
Gustavo A. R. Silvafbfc8502020-02-17 14:02:36 -060032 unsigned long data[];
Jiri Pirko793f4012015-10-14 19:40:48 +020033};
34
35static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
36{
37 struct switchdev_deferred_item *dfitem;
38
39 spin_lock_bh(&deferred_lock);
40 if (list_empty(&deferred)) {
41 dfitem = NULL;
42 goto unlock;
43 }
44 dfitem = list_first_entry(&deferred,
45 struct switchdev_deferred_item, list);
46 list_del(&dfitem->list);
47unlock:
48 spin_unlock_bh(&deferred_lock);
49 return dfitem;
50}
51
52/**
53 * switchdev_deferred_process - Process ops in deferred queue
54 *
55 * Called to flush the ops currently queued in deferred ops queue.
56 * rtnl_lock must be held.
57 */
58void switchdev_deferred_process(void)
59{
60 struct switchdev_deferred_item *dfitem;
61
62 ASSERT_RTNL();
63
64 while ((dfitem = switchdev_deferred_dequeue())) {
65 dfitem->func(dfitem->dev, dfitem->data);
66 dev_put(dfitem->dev);
67 kfree(dfitem);
68 }
69}
70EXPORT_SYMBOL_GPL(switchdev_deferred_process);
71
72static void switchdev_deferred_process_work(struct work_struct *work)
73{
74 rtnl_lock();
75 switchdev_deferred_process();
76 rtnl_unlock();
77}
78
79static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
80
81static int switchdev_deferred_enqueue(struct net_device *dev,
82 const void *data, size_t data_len,
83 switchdev_deferred_func_t *func)
84{
85 struct switchdev_deferred_item *dfitem;
86
87 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
88 if (!dfitem)
89 return -ENOMEM;
90 dfitem->dev = dev;
91 dfitem->func = func;
92 memcpy(dfitem->data, data, data_len);
93 dev_hold(dev);
94 spin_lock_bh(&deferred_lock);
95 list_add_tail(&dfitem->list, &deferred);
96 spin_unlock_bh(&deferred_lock);
97 schedule_work(&deferred_process_work);
98 return 0;
99}
100
Florian Fainellid45224d2019-02-27 11:44:31 -0800101static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102 struct net_device *dev,
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200103 const struct switchdev_attr *attr,
104 struct netlink_ext_ack *extack)
Scott Feldman30943332015-05-10 09:47:48 -0700105{
Florian Fainellid45224d2019-02-27 11:44:31 -0800106 int err;
107 int rc;
Scott Feldman30943332015-05-10 09:47:48 -0700108
Florian Fainellid45224d2019-02-27 11:44:31 -0800109 struct switchdev_notifier_port_attr_info attr_info = {
110 .attr = attr,
Florian Fainellid45224d2019-02-27 11:44:31 -0800111 .handled = false,
112 };
113
114 rc = call_switchdev_blocking_notifiers(nt, dev,
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200115 &attr_info.info, extack);
Florian Fainellid45224d2019-02-27 11:44:31 -0800116 err = notifier_to_errno(rc);
117 if (err) {
118 WARN_ON(!attr_info.handled);
119 return err;
Jiri Pirko0c63d802015-11-03 17:40:53 +0100120 }
Scott Feldman30943332015-05-10 09:47:48 -0700121
Florian Fainellid45224d2019-02-27 11:44:31 -0800122 if (!attr_info.handled)
123 return -EOPNOTSUPP;
Scott Feldman30943332015-05-10 09:47:48 -0700124
Florian Fainellid45224d2019-02-27 11:44:31 -0800125 return 0;
Scott Feldman30943332015-05-10 09:47:48 -0700126}
127
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200128static int switchdev_port_attr_set_now(struct net_device *dev,
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200129 const struct switchdev_attr *attr,
130 struct netlink_ext_ack *extack)
Scott Feldman30943332015-05-10 09:47:48 -0700131{
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200132 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
133 extack);
Scott Feldman30943332015-05-10 09:47:48 -0700134}
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200135
136static void switchdev_port_attr_set_deferred(struct net_device *dev,
137 const void *data)
138{
139 const struct switchdev_attr *attr = data;
140 int err;
141
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200142 err = switchdev_port_attr_set_now(dev, attr, NULL);
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200143 if (err && err != -EOPNOTSUPP)
144 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
145 err, attr->id);
Elad Raz7ceb2af2016-04-21 12:52:43 +0200146 if (attr->complete)
147 attr->complete(dev, err, attr->complete_priv);
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200148}
149
150static int switchdev_port_attr_set_defer(struct net_device *dev,
151 const struct switchdev_attr *attr)
152{
153 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
154 switchdev_port_attr_set_deferred);
155}
156
157/**
158 * switchdev_port_attr_set - Set port attribute
159 *
160 * @dev: port device
161 * @attr: attribute to set
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200162 * @extack: netlink extended ack, for error message propagation
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200163 *
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200164 * rtnl_lock must be held and must not be in atomic section,
165 * in case SWITCHDEV_F_DEFER flag is not set.
166 */
167int switchdev_port_attr_set(struct net_device *dev,
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200168 const struct switchdev_attr *attr,
169 struct netlink_ext_ack *extack)
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200170{
171 if (attr->flags & SWITCHDEV_F_DEFER)
172 return switchdev_port_attr_set_defer(dev, attr);
173 ASSERT_RTNL();
Vladimir Olteandcbdf132021-02-13 22:43:17 +0200174 return switchdev_port_attr_set_now(dev, attr, extack);
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200175}
Scott Feldman30943332015-05-10 09:47:48 -0700176EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
177
Scott Feldmane258d912015-10-28 23:17:31 -0700178static size_t switchdev_obj_size(const struct switchdev_obj *obj)
179{
180 switch (obj->id) {
181 case SWITCHDEV_OBJ_ID_PORT_VLAN:
182 return sizeof(struct switchdev_obj_port_vlan);
Elad Raz4d41e1252016-01-10 21:06:22 +0100183 case SWITCHDEV_OBJ_ID_PORT_MDB:
184 return sizeof(struct switchdev_obj_port_mdb);
Andrew Lunn47d5b6d2017-11-09 23:10:59 +0100185 case SWITCHDEV_OBJ_ID_HOST_MDB:
186 return sizeof(struct switchdev_obj_port_mdb);
Scott Feldmane258d912015-10-28 23:17:31 -0700187 default:
188 BUG();
189 }
190 return 0;
191}
192
Petr Machatad17d9f52018-11-22 23:32:57 +0000193static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
194 struct net_device *dev,
195 const struct switchdev_obj *obj,
Petr Machata69b73202018-12-12 17:02:52 +0000196 struct netlink_ext_ack *extack)
Scott Feldman491d0f12015-05-10 09:47:52 -0700197{
Petr Machatad17d9f52018-11-22 23:32:57 +0000198 int rc;
199 int err;
Scott Feldman491d0f12015-05-10 09:47:52 -0700200
Petr Machatad17d9f52018-11-22 23:32:57 +0000201 struct switchdev_notifier_port_obj_info obj_info = {
202 .obj = obj,
Petr Machatad17d9f52018-11-22 23:32:57 +0000203 .handled = false,
204 };
Scott Feldman491d0f12015-05-10 09:47:52 -0700205
Petr Machata479c86d2018-12-12 17:02:54 +0000206 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
Petr Machatad17d9f52018-11-22 23:32:57 +0000207 err = notifier_to_errno(rc);
208 if (err) {
209 WARN_ON(!obj_info.handled);
210 return err;
Scott Feldman491d0f12015-05-10 09:47:52 -0700211 }
Petr Machatad17d9f52018-11-22 23:32:57 +0000212 if (!obj_info.handled)
213 return -EOPNOTSUPP;
214 return 0;
Scott Feldman491d0f12015-05-10 09:47:52 -0700215}
216
Jiri Pirko4d429c52015-10-14 19:40:52 +0200217static void switchdev_port_obj_add_deferred(struct net_device *dev,
218 const void *data)
219{
220 const struct switchdev_obj *obj = data;
221 int err;
222
Vladimir Olteancf6def52021-01-09 02:01:49 +0200223 ASSERT_RTNL();
224 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
225 dev, obj, NULL);
Jiri Pirko4d429c52015-10-14 19:40:52 +0200226 if (err && err != -EOPNOTSUPP)
227 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
228 err, obj->id);
Elad Raz7ceb2af2016-04-21 12:52:43 +0200229 if (obj->complete)
230 obj->complete(dev, err, obj->complete_priv);
Jiri Pirko4d429c52015-10-14 19:40:52 +0200231}
232
233static int switchdev_port_obj_add_defer(struct net_device *dev,
234 const struct switchdev_obj *obj)
235{
Scott Feldmane258d912015-10-28 23:17:31 -0700236 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
Jiri Pirko4d429c52015-10-14 19:40:52 +0200237 switchdev_port_obj_add_deferred);
238}
Scott Feldman491d0f12015-05-10 09:47:52 -0700239
240/**
Jiri Pirko4d429c52015-10-14 19:40:52 +0200241 * switchdev_port_obj_add - Add port object
Scott Feldman491d0f12015-05-10 09:47:52 -0700242 *
243 * @dev: port device
Jiri Pirko4d429c52015-10-14 19:40:52 +0200244 * @obj: object to add
Andrew Lunnc8af73f2020-07-13 01:15:13 +0200245 * @extack: netlink extended ack
Jiri Pirko4d429c52015-10-14 19:40:52 +0200246 *
Jiri Pirko4d429c52015-10-14 19:40:52 +0200247 * rtnl_lock must be held and must not be in atomic section,
248 * in case SWITCHDEV_F_DEFER flag is not set.
Scott Feldman491d0f12015-05-10 09:47:52 -0700249 */
Jiri Pirko4d429c52015-10-14 19:40:52 +0200250int switchdev_port_obj_add(struct net_device *dev,
Petr Machata69b73202018-12-12 17:02:52 +0000251 const struct switchdev_obj *obj,
252 struct netlink_ext_ack *extack)
Scott Feldman491d0f12015-05-10 09:47:52 -0700253{
Jiri Pirko4d429c52015-10-14 19:40:52 +0200254 if (obj->flags & SWITCHDEV_F_DEFER)
255 return switchdev_port_obj_add_defer(dev, obj);
256 ASSERT_RTNL();
Vladimir Olteancf6def52021-01-09 02:01:49 +0200257 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
258 dev, obj, extack);
Jiri Pirko4d429c52015-10-14 19:40:52 +0200259}
260EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
261
262static int switchdev_port_obj_del_now(struct net_device *dev,
263 const struct switchdev_obj *obj)
264{
Petr Machatad17d9f52018-11-22 23:32:57 +0000265 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
Vladimir Olteanffb68fc2021-01-09 02:01:48 +0200266 dev, obj, NULL);
Scott Feldman491d0f12015-05-10 09:47:52 -0700267}
Jiri Pirko4d429c52015-10-14 19:40:52 +0200268
269static void switchdev_port_obj_del_deferred(struct net_device *dev,
270 const void *data)
271{
272 const struct switchdev_obj *obj = data;
273 int err;
274
275 err = switchdev_port_obj_del_now(dev, obj);
276 if (err && err != -EOPNOTSUPP)
277 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
278 err, obj->id);
Elad Raz7ceb2af2016-04-21 12:52:43 +0200279 if (obj->complete)
280 obj->complete(dev, err, obj->complete_priv);
Jiri Pirko4d429c52015-10-14 19:40:52 +0200281}
282
283static int switchdev_port_obj_del_defer(struct net_device *dev,
284 const struct switchdev_obj *obj)
285{
Scott Feldmane258d912015-10-28 23:17:31 -0700286 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
Jiri Pirko4d429c52015-10-14 19:40:52 +0200287 switchdev_port_obj_del_deferred);
288}
289
290/**
291 * switchdev_port_obj_del - Delete port object
292 *
293 * @dev: port device
Jiri Pirko4d429c52015-10-14 19:40:52 +0200294 * @obj: object to delete
295 *
296 * rtnl_lock must be held and must not be in atomic section,
297 * in case SWITCHDEV_F_DEFER flag is not set.
298 */
299int switchdev_port_obj_del(struct net_device *dev,
300 const struct switchdev_obj *obj)
301{
302 if (obj->flags & SWITCHDEV_F_DEFER)
303 return switchdev_port_obj_del_defer(dev, obj);
304 ASSERT_RTNL();
305 return switchdev_port_obj_del_now(dev, obj);
306}
Scott Feldman491d0f12015-05-10 09:47:52 -0700307EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
308
Arkadi Sharshevskyff5cf102017-06-08 08:44:13 +0200309static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
Petr Machataa93e3b12018-11-22 23:28:25 +0000310static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100311
312/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700313 * register_switchdev_notifier - Register notifier
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100314 * @nb: notifier_block
315 *
Arkadi Sharshevskyff5cf102017-06-08 08:44:13 +0200316 * Register switch device notifier.
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100317 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700318int register_switchdev_notifier(struct notifier_block *nb)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100319{
Arkadi Sharshevskyff5cf102017-06-08 08:44:13 +0200320 return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100321}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700322EXPORT_SYMBOL_GPL(register_switchdev_notifier);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100323
324/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700325 * unregister_switchdev_notifier - Unregister notifier
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100326 * @nb: notifier_block
327 *
328 * Unregister switch device notifier.
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100329 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700330int unregister_switchdev_notifier(struct notifier_block *nb)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100331{
Arkadi Sharshevskyff5cf102017-06-08 08:44:13 +0200332 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100333}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700334EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100335
336/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700337 * call_switchdev_notifiers - Call notifiers
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100338 * @val: value passed unmodified to notifier function
339 * @dev: port device
340 * @info: notifier information data
Tian Taoea6754a2020-09-22 21:32:19 +0800341 * @extack: netlink extended ack
Arkadi Sharshevskyff5cf102017-06-08 08:44:13 +0200342 * Call all network notifier blocks.
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100343 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700344int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
Petr Machata66859872019-01-16 23:06:56 +0000345 struct switchdev_notifier_info *info,
346 struct netlink_ext_ack *extack)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100347{
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100348 info->dev = dev;
Petr Machata66859872019-01-16 23:06:56 +0000349 info->extack = extack;
Arkadi Sharshevskyff5cf102017-06-08 08:44:13 +0200350 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100351}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700352EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800353
Petr Machataa93e3b12018-11-22 23:28:25 +0000354int register_switchdev_blocking_notifier(struct notifier_block *nb)
355{
356 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
357
358 return blocking_notifier_chain_register(chain, nb);
359}
360EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
361
362int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
363{
364 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
365
366 return blocking_notifier_chain_unregister(chain, nb);
367}
368EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
369
370int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
Petr Machata479c86d2018-12-12 17:02:54 +0000371 struct switchdev_notifier_info *info,
372 struct netlink_ext_ack *extack)
Petr Machataa93e3b12018-11-22 23:28:25 +0000373{
374 info->dev = dev;
Petr Machata479c86d2018-12-12 17:02:54 +0000375 info->extack = extack;
Petr Machataa93e3b12018-11-22 23:28:25 +0000376 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
377 val, info);
378}
379EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
380
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300381struct switchdev_nested_priv {
382 bool (*check_cb)(const struct net_device *dev);
383 bool (*foreign_dev_check_cb)(const struct net_device *dev,
384 const struct net_device *foreign_dev);
385 const struct net_device *dev;
386 struct net_device *lower_dev;
387};
388
389static int switchdev_lower_dev_walk(struct net_device *lower_dev,
390 struct netdev_nested_priv *priv)
391{
392 struct switchdev_nested_priv *switchdev_priv = priv->data;
393 bool (*foreign_dev_check_cb)(const struct net_device *dev,
394 const struct net_device *foreign_dev);
395 bool (*check_cb)(const struct net_device *dev);
396 const struct net_device *dev;
397
398 check_cb = switchdev_priv->check_cb;
399 foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
400 dev = switchdev_priv->dev;
401
402 if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
403 switchdev_priv->lower_dev = lower_dev;
404 return 1;
405 }
406
407 return 0;
408}
409
410static struct net_device *
411switchdev_lower_dev_find(struct net_device *dev,
412 bool (*check_cb)(const struct net_device *dev),
413 bool (*foreign_dev_check_cb)(const struct net_device *dev,
414 const struct net_device *foreign_dev))
415{
416 struct switchdev_nested_priv switchdev_priv = {
417 .check_cb = check_cb,
418 .foreign_dev_check_cb = foreign_dev_check_cb,
419 .dev = dev,
420 .lower_dev = NULL,
421 };
422 struct netdev_nested_priv priv = {
423 .data = &switchdev_priv,
424 };
425
426 netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
427
428 return switchdev_priv.lower_dev;
429}
430
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300431static int __switchdev_handle_fdb_add_to_device(struct net_device *dev,
432 const struct net_device *orig_dev,
433 const struct switchdev_notifier_fdb_info *fdb_info,
434 bool (*check_cb)(const struct net_device *dev),
435 bool (*foreign_dev_check_cb)(const struct net_device *dev,
436 const struct net_device *foreign_dev),
437 int (*add_cb)(struct net_device *dev,
438 const struct net_device *orig_dev, const void *ctx,
439 const struct switchdev_notifier_fdb_info *fdb_info),
440 int (*lag_add_cb)(struct net_device *dev,
441 const struct net_device *orig_dev, const void *ctx,
442 const struct switchdev_notifier_fdb_info *fdb_info))
443{
444 const struct switchdev_notifier_info *info = &fdb_info->info;
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300445 struct net_device *br, *lower_dev;
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300446 struct list_head *iter;
447 int err = -EOPNOTSUPP;
448
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300449 if (check_cb(dev))
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300450 return add_cb(dev, orig_dev, info->ctx, fdb_info);
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300451
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300452 if (netif_is_lag_master(dev)) {
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300453 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
454 goto maybe_bridged_with_us;
455
456 /* This is a LAG interface that we offload */
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300457 if (!lag_add_cb)
458 return -EOPNOTSUPP;
459
460 return lag_add_cb(dev, orig_dev, info->ctx, fdb_info);
461 }
462
463 /* Recurse through lower interfaces in case the FDB entry is pointing
464 * towards a bridge device.
465 */
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300466 if (netif_is_bridge_master(dev)) {
467 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
468 return 0;
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300469
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300470 /* This is a bridge interface that we offload */
471 netdev_for_each_lower_dev(dev, lower_dev, iter) {
472 /* Do not propagate FDB entries across bridges */
473 if (netif_is_bridge_master(lower_dev))
474 continue;
475
476 /* Bridge ports might be either us, or LAG interfaces
477 * that we offload.
478 */
479 if (!check_cb(lower_dev) &&
480 !switchdev_lower_dev_find(lower_dev, check_cb,
481 foreign_dev_check_cb))
482 continue;
483
484 err = __switchdev_handle_fdb_add_to_device(lower_dev, orig_dev,
485 fdb_info, check_cb,
486 foreign_dev_check_cb,
487 add_cb, lag_add_cb);
488 if (err && err != -EOPNOTSUPP)
489 return err;
490 }
491
492 return 0;
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300493 }
494
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300495maybe_bridged_with_us:
496 /* Event is neither on a bridge nor a LAG. Check whether it is on an
497 * interface that is in a bridge with us.
498 */
499 br = netdev_master_upper_dev_get_rcu(dev);
500 if (!br || !netif_is_bridge_master(br))
501 return 0;
502
503 if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
504 return 0;
505
506 return __switchdev_handle_fdb_add_to_device(br, orig_dev, fdb_info,
507 check_cb, foreign_dev_check_cb,
508 add_cb, lag_add_cb);
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300509}
510
511int switchdev_handle_fdb_add_to_device(struct net_device *dev,
512 const struct switchdev_notifier_fdb_info *fdb_info,
513 bool (*check_cb)(const struct net_device *dev),
514 bool (*foreign_dev_check_cb)(const struct net_device *dev,
515 const struct net_device *foreign_dev),
516 int (*add_cb)(struct net_device *dev,
517 const struct net_device *orig_dev, const void *ctx,
518 const struct switchdev_notifier_fdb_info *fdb_info),
519 int (*lag_add_cb)(struct net_device *dev,
520 const struct net_device *orig_dev, const void *ctx,
521 const struct switchdev_notifier_fdb_info *fdb_info))
522{
523 int err;
524
525 err = __switchdev_handle_fdb_add_to_device(dev, dev, fdb_info,
526 check_cb,
527 foreign_dev_check_cb,
528 add_cb, lag_add_cb);
529 if (err == -EOPNOTSUPP)
530 err = 0;
531
532 return err;
533}
534EXPORT_SYMBOL_GPL(switchdev_handle_fdb_add_to_device);
535
536static int __switchdev_handle_fdb_del_to_device(struct net_device *dev,
537 const struct net_device *orig_dev,
538 const struct switchdev_notifier_fdb_info *fdb_info,
539 bool (*check_cb)(const struct net_device *dev),
540 bool (*foreign_dev_check_cb)(const struct net_device *dev,
541 const struct net_device *foreign_dev),
542 int (*del_cb)(struct net_device *dev,
543 const struct net_device *orig_dev, const void *ctx,
544 const struct switchdev_notifier_fdb_info *fdb_info),
545 int (*lag_del_cb)(struct net_device *dev,
546 const struct net_device *orig_dev, const void *ctx,
547 const struct switchdev_notifier_fdb_info *fdb_info))
548{
549 const struct switchdev_notifier_info *info = &fdb_info->info;
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300550 struct net_device *br, *lower_dev;
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300551 struct list_head *iter;
552 int err = -EOPNOTSUPP;
553
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300554 if (check_cb(dev))
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300555 return del_cb(dev, orig_dev, info->ctx, fdb_info);
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300556
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300557 if (netif_is_lag_master(dev)) {
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300558 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
559 goto maybe_bridged_with_us;
560
561 /* This is a LAG interface that we offload */
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300562 if (!lag_del_cb)
563 return -EOPNOTSUPP;
564
565 return lag_del_cb(dev, orig_dev, info->ctx, fdb_info);
566 }
567
568 /* Recurse through lower interfaces in case the FDB entry is pointing
569 * towards a bridge device.
570 */
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300571 if (netif_is_bridge_master(dev)) {
572 if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
573 return 0;
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300574
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300575 /* This is a bridge interface that we offload */
576 netdev_for_each_lower_dev(dev, lower_dev, iter) {
577 /* Do not propagate FDB entries across bridges */
578 if (netif_is_bridge_master(lower_dev))
579 continue;
580
581 /* Bridge ports might be either us, or LAG interfaces
582 * that we offload.
583 */
584 if (!check_cb(lower_dev) &&
585 !switchdev_lower_dev_find(lower_dev, check_cb,
586 foreign_dev_check_cb))
587 continue;
588
589 err = __switchdev_handle_fdb_del_to_device(lower_dev, orig_dev,
590 fdb_info, check_cb,
591 foreign_dev_check_cb,
592 del_cb, lag_del_cb);
593 if (err && err != -EOPNOTSUPP)
594 return err;
595 }
596
597 return 0;
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300598 }
599
Vladimir Oltean2b0a5682021-07-22 02:05:55 +0300600maybe_bridged_with_us:
601 /* Event is neither on a bridge nor a LAG. Check whether it is on an
602 * interface that is in a bridge with us.
603 */
604 br = netdev_master_upper_dev_get_rcu(dev);
605 if (!br || !netif_is_bridge_master(br))
606 return 0;
607
608 if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
609 return 0;
610
611 return __switchdev_handle_fdb_del_to_device(br, orig_dev, fdb_info,
612 check_cb, foreign_dev_check_cb,
613 del_cb, lag_del_cb);
Vladimir Oltean8ca07172021-07-19 16:51:39 +0300614}
615
616int switchdev_handle_fdb_del_to_device(struct net_device *dev,
617 const struct switchdev_notifier_fdb_info *fdb_info,
618 bool (*check_cb)(const struct net_device *dev),
619 bool (*foreign_dev_check_cb)(const struct net_device *dev,
620 const struct net_device *foreign_dev),
621 int (*del_cb)(struct net_device *dev,
622 const struct net_device *orig_dev, const void *ctx,
623 const struct switchdev_notifier_fdb_info *fdb_info),
624 int (*lag_del_cb)(struct net_device *dev,
625 const struct net_device *orig_dev, const void *ctx,
626 const struct switchdev_notifier_fdb_info *fdb_info))
627{
628 int err;
629
630 err = __switchdev_handle_fdb_del_to_device(dev, dev, fdb_info,
631 check_cb,
632 foreign_dev_check_cb,
633 del_cb, lag_del_cb);
634 if (err == -EOPNOTSUPP)
635 err = 0;
636
637 return err;
638}
639EXPORT_SYMBOL_GPL(switchdev_handle_fdb_del_to_device);
640
Petr Machataf30f0602018-11-22 23:29:44 +0000641static int __switchdev_handle_port_obj_add(struct net_device *dev,
642 struct switchdev_notifier_port_obj_info *port_obj_info,
643 bool (*check_cb)(const struct net_device *dev),
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300644 int (*add_cb)(struct net_device *dev, const void *ctx,
Petr Machataf30f0602018-11-22 23:29:44 +0000645 const struct switchdev_obj *obj,
Petr Machata69213512018-12-12 17:02:56 +0000646 struct netlink_ext_ack *extack))
Petr Machataf30f0602018-11-22 23:29:44 +0000647{
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300648 struct switchdev_notifier_info *info = &port_obj_info->info;
Petr Machata69213512018-12-12 17:02:56 +0000649 struct netlink_ext_ack *extack;
Petr Machataf30f0602018-11-22 23:29:44 +0000650 struct net_device *lower_dev;
651 struct list_head *iter;
652 int err = -EOPNOTSUPP;
653
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300654 extack = switchdev_notifier_info_to_extack(info);
Petr Machata69213512018-12-12 17:02:56 +0000655
Petr Machataf30f0602018-11-22 23:29:44 +0000656 if (check_cb(dev)) {
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300657 err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
Rasmus Villemoes20776b42021-01-25 13:41:16 +0100658 if (err != -EOPNOTSUPP)
659 port_obj_info->handled = true;
660 return err;
Petr Machataf30f0602018-11-22 23:29:44 +0000661 }
662
663 /* Switch ports might be stacked under e.g. a LAG. Ignore the
664 * unsupported devices, another driver might be able to handle them. But
665 * propagate to the callers any hard errors.
666 *
667 * If the driver does its own bookkeeping of stacked ports, it's not
668 * necessary to go through this helper.
669 */
670 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Russell King07c6f982020-02-26 17:14:21 +0000671 if (netif_is_bridge_master(lower_dev))
672 continue;
673
Petr Machataf30f0602018-11-22 23:29:44 +0000674 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
675 check_cb, add_cb);
676 if (err && err != -EOPNOTSUPP)
677 return err;
678 }
679
680 return err;
681}
682
683int switchdev_handle_port_obj_add(struct net_device *dev,
684 struct switchdev_notifier_port_obj_info *port_obj_info,
685 bool (*check_cb)(const struct net_device *dev),
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300686 int (*add_cb)(struct net_device *dev, const void *ctx,
Petr Machataf30f0602018-11-22 23:29:44 +0000687 const struct switchdev_obj *obj,
Petr Machata69213512018-12-12 17:02:56 +0000688 struct netlink_ext_ack *extack))
Petr Machataf30f0602018-11-22 23:29:44 +0000689{
690 int err;
691
692 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
693 add_cb);
694 if (err == -EOPNOTSUPP)
695 err = 0;
696 return err;
697}
698EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
699
700static int __switchdev_handle_port_obj_del(struct net_device *dev,
701 struct switchdev_notifier_port_obj_info *port_obj_info,
702 bool (*check_cb)(const struct net_device *dev),
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300703 int (*del_cb)(struct net_device *dev, const void *ctx,
Petr Machataf30f0602018-11-22 23:29:44 +0000704 const struct switchdev_obj *obj))
705{
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300706 struct switchdev_notifier_info *info = &port_obj_info->info;
Petr Machataf30f0602018-11-22 23:29:44 +0000707 struct net_device *lower_dev;
708 struct list_head *iter;
709 int err = -EOPNOTSUPP;
710
711 if (check_cb(dev)) {
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300712 err = del_cb(dev, info->ctx, port_obj_info->obj);
Rasmus Villemoes20776b42021-01-25 13:41:16 +0100713 if (err != -EOPNOTSUPP)
714 port_obj_info->handled = true;
715 return err;
Petr Machataf30f0602018-11-22 23:29:44 +0000716 }
717
718 /* Switch ports might be stacked under e.g. a LAG. Ignore the
719 * unsupported devices, another driver might be able to handle them. But
720 * propagate to the callers any hard errors.
721 *
722 * If the driver does its own bookkeeping of stacked ports, it's not
723 * necessary to go through this helper.
724 */
725 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Russell King07c6f982020-02-26 17:14:21 +0000726 if (netif_is_bridge_master(lower_dev))
727 continue;
728
Petr Machataf30f0602018-11-22 23:29:44 +0000729 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
730 check_cb, del_cb);
731 if (err && err != -EOPNOTSUPP)
732 return err;
733 }
734
735 return err;
736}
737
738int switchdev_handle_port_obj_del(struct net_device *dev,
739 struct switchdev_notifier_port_obj_info *port_obj_info,
740 bool (*check_cb)(const struct net_device *dev),
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300741 int (*del_cb)(struct net_device *dev, const void *ctx,
Petr Machataf30f0602018-11-22 23:29:44 +0000742 const struct switchdev_obj *obj))
743{
744 int err;
745
746 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
747 del_cb);
748 if (err == -EOPNOTSUPP)
749 err = 0;
750 return err;
751}
752EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
Florian Fainelli1cb33af2019-02-27 11:44:25 -0800753
754static int __switchdev_handle_port_attr_set(struct net_device *dev,
755 struct switchdev_notifier_port_attr_info *port_attr_info,
756 bool (*check_cb)(const struct net_device *dev),
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300757 int (*set_cb)(struct net_device *dev, const void *ctx,
Vladimir Oltean4c08c582021-02-12 17:15:51 +0200758 const struct switchdev_attr *attr,
759 struct netlink_ext_ack *extack))
Florian Fainelli1cb33af2019-02-27 11:44:25 -0800760{
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300761 struct switchdev_notifier_info *info = &port_attr_info->info;
Vladimir Oltean4c08c582021-02-12 17:15:51 +0200762 struct netlink_ext_ack *extack;
Florian Fainelli1cb33af2019-02-27 11:44:25 -0800763 struct net_device *lower_dev;
764 struct list_head *iter;
765 int err = -EOPNOTSUPP;
766
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300767 extack = switchdev_notifier_info_to_extack(info);
Vladimir Oltean4c08c582021-02-12 17:15:51 +0200768
Florian Fainelli1cb33af2019-02-27 11:44:25 -0800769 if (check_cb(dev)) {
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300770 err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
Rasmus Villemoes20776b42021-01-25 13:41:16 +0100771 if (err != -EOPNOTSUPP)
772 port_attr_info->handled = true;
773 return err;
Florian Fainelli1cb33af2019-02-27 11:44:25 -0800774 }
775
776 /* Switch ports might be stacked under e.g. a LAG. Ignore the
777 * unsupported devices, another driver might be able to handle them. But
778 * propagate to the callers any hard errors.
779 *
780 * If the driver does its own bookkeeping of stacked ports, it's not
781 * necessary to go through this helper.
782 */
783 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Russell King07c6f982020-02-26 17:14:21 +0000784 if (netif_is_bridge_master(lower_dev))
785 continue;
786
Florian Fainelli1cb33af2019-02-27 11:44:25 -0800787 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
788 check_cb, set_cb);
789 if (err && err != -EOPNOTSUPP)
790 return err;
791 }
792
793 return err;
794}
795
796int switchdev_handle_port_attr_set(struct net_device *dev,
797 struct switchdev_notifier_port_attr_info *port_attr_info,
798 bool (*check_cb)(const struct net_device *dev),
Vladimir Oltean69bfac92021-06-27 14:54:24 +0300799 int (*set_cb)(struct net_device *dev, const void *ctx,
Vladimir Oltean4c08c582021-02-12 17:15:51 +0200800 const struct switchdev_attr *attr,
801 struct netlink_ext_ack *extack))
Florian Fainelli1cb33af2019-02-27 11:44:25 -0800802{
803 int err;
804
805 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
806 set_cb);
807 if (err == -EOPNOTSUPP)
808 err = 0;
809 return err;
810}
811EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
Vladimir Oltean957e2232021-08-03 23:34:08 +0300812
813int switchdev_bridge_port_offload(struct net_device *brport_dev,
814 struct net_device *dev, const void *ctx,
815 struct notifier_block *atomic_nb,
816 struct notifier_block *blocking_nb,
817 bool tx_fwd_offload,
818 struct netlink_ext_ack *extack)
819{
820 struct switchdev_notifier_brport_info brport_info = {
821 .brport = {
822 .dev = dev,
823 .ctx = ctx,
824 .atomic_nb = atomic_nb,
825 .blocking_nb = blocking_nb,
826 .tx_fwd_offload = tx_fwd_offload,
827 },
828 };
829 int err;
830
831 ASSERT_RTNL();
832
833 err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
834 brport_dev, &brport_info.info,
835 extack);
836 return notifier_to_errno(err);
837}
838EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
839
840void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
841 const void *ctx,
842 struct notifier_block *atomic_nb,
843 struct notifier_block *blocking_nb)
844{
845 struct switchdev_notifier_brport_info brport_info = {
846 .brport = {
847 .ctx = ctx,
848 .atomic_nb = atomic_nb,
849 .blocking_nb = blocking_nb,
850 },
851 };
852
853 ASSERT_RTNL();
854
855 call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
856 brport_dev, &brport_info.info,
857 NULL);
858}
859EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);