blob: 6dfd19e529387b0399ff5d10196dd6f18181a390 [file] [log] [blame]
Jiri Pirko007f7902014-11-28 14:34:17 +01001/*
2 * net/switchdev/switchdev.c - Switch device API
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02003 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
Scott Feldmanf8f21472015-03-09 13:59:09 -07004 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
Jiri Pirko007f7902014-11-28 14:34:17 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/init.h>
Jiri Pirko03bf0c22015-01-15 23:49:36 +010015#include <linux/mutex.h>
16#include <linux/notifier.h>
Jiri Pirko007f7902014-11-28 14:34:17 +010017#include <linux/netdevice.h>
Jiri Pirko850d0cb2015-10-14 19:40:51 +020018#include <linux/etherdevice.h>
Scott Feldman47f83282015-05-10 09:47:56 -070019#include <linux/if_bridge.h>
Jiri Pirko7ea6eb32015-09-24 10:02:41 +020020#include <linux/list.h>
Jiri Pirko793f4012015-10-14 19:40:48 +020021#include <linux/workqueue.h>
Nikolay Aleksandrov87aaf2c2015-10-12 14:31:01 +020022#include <linux/if_vlan.h>
Scott Feldman5e8d9042015-03-05 21:21:15 -080023#include <net/ip_fib.h>
Jiri Pirko007f7902014-11-28 14:34:17 +010024#include <net/switchdev.h>
25
26/**
Jiri Pirko7ea6eb32015-09-24 10:02:41 +020027 * switchdev_trans_item_enqueue - Enqueue data item to transaction queue
28 *
29 * @trans: transaction
30 * @data: pointer to data being queued
31 * @destructor: data destructor
32 * @tritem: transaction item being queued
33 *
34 * Enqeueue data item to transaction queue. tritem is typically placed in
35 * cointainter pointed at by data pointer. Destructor is called on
36 * transaction abort and after successful commit phase in case
37 * the caller did not dequeue the item before.
38 */
39void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
40 void *data, void (*destructor)(void const *),
41 struct switchdev_trans_item *tritem)
42{
43 tritem->data = data;
44 tritem->destructor = destructor;
45 list_add_tail(&tritem->list, &trans->item_list);
46}
47EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
48
49static struct switchdev_trans_item *
50__switchdev_trans_item_dequeue(struct switchdev_trans *trans)
51{
52 struct switchdev_trans_item *tritem;
53
54 if (list_empty(&trans->item_list))
55 return NULL;
56 tritem = list_first_entry(&trans->item_list,
57 struct switchdev_trans_item, list);
58 list_del(&tritem->list);
59 return tritem;
60}
61
62/**
63 * switchdev_trans_item_dequeue - Dequeue data item from transaction queue
64 *
65 * @trans: transaction
66 */
67void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
68{
69 struct switchdev_trans_item *tritem;
70
71 tritem = __switchdev_trans_item_dequeue(trans);
72 BUG_ON(!tritem);
73 return tritem->data;
74}
75EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
76
77static void switchdev_trans_init(struct switchdev_trans *trans)
78{
79 INIT_LIST_HEAD(&trans->item_list);
80}
81
82static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
83{
84 struct switchdev_trans_item *tritem;
85
86 while ((tritem = __switchdev_trans_item_dequeue(trans)))
87 tritem->destructor(tritem->data);
88}
89
90static void switchdev_trans_items_warn_destroy(struct net_device *dev,
91 struct switchdev_trans *trans)
92{
93 WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
94 dev->name);
95 switchdev_trans_items_destroy(trans);
96}
97
Jiri Pirko793f4012015-10-14 19:40:48 +020098static LIST_HEAD(deferred);
99static DEFINE_SPINLOCK(deferred_lock);
100
101typedef void switchdev_deferred_func_t(struct net_device *dev,
102 const void *data);
103
104struct switchdev_deferred_item {
105 struct list_head list;
106 struct net_device *dev;
107 switchdev_deferred_func_t *func;
108 unsigned long data[0];
109};
110
111static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
112{
113 struct switchdev_deferred_item *dfitem;
114
115 spin_lock_bh(&deferred_lock);
116 if (list_empty(&deferred)) {
117 dfitem = NULL;
118 goto unlock;
119 }
120 dfitem = list_first_entry(&deferred,
121 struct switchdev_deferred_item, list);
122 list_del(&dfitem->list);
123unlock:
124 spin_unlock_bh(&deferred_lock);
125 return dfitem;
126}
127
128/**
129 * switchdev_deferred_process - Process ops in deferred queue
130 *
131 * Called to flush the ops currently queued in deferred ops queue.
132 * rtnl_lock must be held.
133 */
134void switchdev_deferred_process(void)
135{
136 struct switchdev_deferred_item *dfitem;
137
138 ASSERT_RTNL();
139
140 while ((dfitem = switchdev_deferred_dequeue())) {
141 dfitem->func(dfitem->dev, dfitem->data);
142 dev_put(dfitem->dev);
143 kfree(dfitem);
144 }
145}
146EXPORT_SYMBOL_GPL(switchdev_deferred_process);
147
148static void switchdev_deferred_process_work(struct work_struct *work)
149{
150 rtnl_lock();
151 switchdev_deferred_process();
152 rtnl_unlock();
153}
154
155static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
156
157static int switchdev_deferred_enqueue(struct net_device *dev,
158 const void *data, size_t data_len,
159 switchdev_deferred_func_t *func)
160{
161 struct switchdev_deferred_item *dfitem;
162
163 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
164 if (!dfitem)
165 return -ENOMEM;
166 dfitem->dev = dev;
167 dfitem->func = func;
168 memcpy(dfitem->data, data, data_len);
169 dev_hold(dev);
170 spin_lock_bh(&deferred_lock);
171 list_add_tail(&dfitem->list, &deferred);
172 spin_unlock_bh(&deferred_lock);
173 schedule_work(&deferred_process_work);
174 return 0;
175}
176
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200177/**
Scott Feldman30943332015-05-10 09:47:48 -0700178 * switchdev_port_attr_get - Get port attribute
179 *
180 * @dev: port device
181 * @attr: attribute to get
182 */
183int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
184{
185 const struct switchdev_ops *ops = dev->switchdev_ops;
186 struct net_device *lower_dev;
187 struct list_head *iter;
188 struct switchdev_attr first = {
Jiri Pirko1f868392015-10-01 11:03:42 +0200189 .id = SWITCHDEV_ATTR_ID_UNDEFINED
Scott Feldman30943332015-05-10 09:47:48 -0700190 };
191 int err = -EOPNOTSUPP;
192
193 if (ops && ops->switchdev_port_attr_get)
194 return ops->switchdev_port_attr_get(dev, attr);
195
196 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
197 return err;
198
199 /* Switch device port(s) may be stacked under
200 * bond/team/vlan dev, so recurse down to get attr on
201 * each port. Return -ENODATA if attr values don't
202 * compare across ports.
203 */
204
205 netdev_for_each_lower_dev(dev, lower_dev, iter) {
206 err = switchdev_port_attr_get(lower_dev, attr);
207 if (err)
208 break;
Jiri Pirko1f868392015-10-01 11:03:42 +0200209 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
Scott Feldman30943332015-05-10 09:47:48 -0700210 first = *attr;
211 else if (memcmp(&first, attr, sizeof(*attr)))
212 return -ENODATA;
213 }
214
215 return err;
216}
217EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
218
219static int __switchdev_port_attr_set(struct net_device *dev,
Jiri Pirkof7fadf32015-10-14 19:40:49 +0200220 const struct switchdev_attr *attr,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200221 struct switchdev_trans *trans)
Scott Feldman30943332015-05-10 09:47:48 -0700222{
223 const struct switchdev_ops *ops = dev->switchdev_ops;
224 struct net_device *lower_dev;
225 struct list_head *iter;
226 int err = -EOPNOTSUPP;
227
228 if (ops && ops->switchdev_port_attr_set)
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200229 return ops->switchdev_port_attr_set(dev, attr, trans);
Scott Feldman30943332015-05-10 09:47:48 -0700230
231 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
Scott Feldman464314e2015-10-08 19:23:18 -0700232 goto done;
Scott Feldman30943332015-05-10 09:47:48 -0700233
234 /* Switch device port(s) may be stacked under
235 * bond/team/vlan dev, so recurse down to set attr on
236 * each port.
237 */
238
239 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200240 err = __switchdev_port_attr_set(lower_dev, attr, trans);
Scott Feldman464314e2015-10-08 19:23:18 -0700241 if (err == -EOPNOTSUPP &&
242 attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
243 continue;
Scott Feldman30943332015-05-10 09:47:48 -0700244 if (err)
245 break;
246 }
247
Scott Feldman464314e2015-10-08 19:23:18 -0700248done:
249 if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
250 err = 0;
251
Scott Feldman30943332015-05-10 09:47:48 -0700252 return err;
253}
254
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200255static int switchdev_port_attr_set_now(struct net_device *dev,
256 const struct switchdev_attr *attr)
Scott Feldman30943332015-05-10 09:47:48 -0700257{
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200258 struct switchdev_trans trans;
Scott Feldman30943332015-05-10 09:47:48 -0700259 int err;
260
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200261 switchdev_trans_init(&trans);
262
Scott Feldman30943332015-05-10 09:47:48 -0700263 /* Phase I: prepare for attr set. Driver/device should fail
264 * here if there are going to be issues in the commit phase,
265 * such as lack of resources or support. The driver/device
266 * should reserve resources needed for the commit phase here,
267 * but should not commit the attr.
268 */
269
Jiri Pirkof623ab72015-09-24 10:02:49 +0200270 trans.ph_prepare = true;
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200271 err = __switchdev_port_attr_set(dev, attr, &trans);
Scott Feldman30943332015-05-10 09:47:48 -0700272 if (err) {
273 /* Prepare phase failed: abort the transaction. Any
274 * resources reserved in the prepare phase are
275 * released.
276 */
277
Jiri Pirko9f6467c2015-09-24 10:02:47 +0200278 if (err != -EOPNOTSUPP)
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200279 switchdev_trans_items_destroy(&trans);
Scott Feldman30943332015-05-10 09:47:48 -0700280
281 return err;
282 }
283
284 /* Phase II: commit attr set. This cannot fail as a fault
285 * of driver/device. If it does, it's a bug in the driver/device
286 * because the driver said everythings was OK in phase I.
287 */
288
Jiri Pirkof623ab72015-09-24 10:02:49 +0200289 trans.ph_prepare = false;
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200290 err = __switchdev_port_attr_set(dev, attr, &trans);
Scott Feldmane9fdaec2015-06-11 11:20:42 -0700291 WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
292 dev->name, attr->id);
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200293 switchdev_trans_items_warn_destroy(dev, &trans);
Scott Feldman30943332015-05-10 09:47:48 -0700294
295 return err;
296}
Jiri Pirko0bc05d52015-10-14 19:40:50 +0200297
298static void switchdev_port_attr_set_deferred(struct net_device *dev,
299 const void *data)
300{
301 const struct switchdev_attr *attr = data;
302 int err;
303
304 err = switchdev_port_attr_set_now(dev, attr);
305 if (err && err != -EOPNOTSUPP)
306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
307 err, attr->id);
308}
309
310static int switchdev_port_attr_set_defer(struct net_device *dev,
311 const struct switchdev_attr *attr)
312{
313 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
314 switchdev_port_attr_set_deferred);
315}
316
317/**
318 * switchdev_port_attr_set - Set port attribute
319 *
320 * @dev: port device
321 * @attr: attribute to set
322 *
323 * Use a 2-phase prepare-commit transaction model to ensure
324 * system is not left in a partially updated state due to
325 * failure from driver/device.
326 *
327 * rtnl_lock must be held and must not be in atomic section,
328 * in case SWITCHDEV_F_DEFER flag is not set.
329 */
330int switchdev_port_attr_set(struct net_device *dev,
331 const struct switchdev_attr *attr)
332{
333 if (attr->flags & SWITCHDEV_F_DEFER)
334 return switchdev_port_attr_set_defer(dev, attr);
335 ASSERT_RTNL();
336 return switchdev_port_attr_set_now(dev, attr);
337}
Scott Feldman30943332015-05-10 09:47:48 -0700338EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
339
Scott Feldmane258d912015-10-28 23:17:31 -0700340static size_t switchdev_obj_size(const struct switchdev_obj *obj)
341{
342 switch (obj->id) {
343 case SWITCHDEV_OBJ_ID_PORT_VLAN:
344 return sizeof(struct switchdev_obj_port_vlan);
345 case SWITCHDEV_OBJ_ID_IPV4_FIB:
346 return sizeof(struct switchdev_obj_ipv4_fib);
347 case SWITCHDEV_OBJ_ID_PORT_FDB:
348 return sizeof(struct switchdev_obj_port_fdb);
349 default:
350 BUG();
351 }
352 return 0;
353}
354
Scott Feldman22c1f672015-05-12 23:03:51 -0700355static int __switchdev_port_obj_add(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +0200356 const struct switchdev_obj *obj,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200357 struct switchdev_trans *trans)
Scott Feldman491d0f12015-05-10 09:47:52 -0700358{
359 const struct switchdev_ops *ops = dev->switchdev_ops;
360 struct net_device *lower_dev;
361 struct list_head *iter;
362 int err = -EOPNOTSUPP;
363
364 if (ops && ops->switchdev_port_obj_add)
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200365 return ops->switchdev_port_obj_add(dev, obj, trans);
Scott Feldman491d0f12015-05-10 09:47:52 -0700366
367 /* Switch device port(s) may be stacked under
368 * bond/team/vlan dev, so recurse down to add object on
369 * each port.
370 */
371
372 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200373 err = __switchdev_port_obj_add(lower_dev, obj, trans);
Scott Feldman491d0f12015-05-10 09:47:52 -0700374 if (err)
375 break;
376 }
377
378 return err;
379}
380
Jiri Pirko4d429c52015-10-14 19:40:52 +0200381static int switchdev_port_obj_add_now(struct net_device *dev,
382 const struct switchdev_obj *obj)
Scott Feldman491d0f12015-05-10 09:47:52 -0700383{
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200384 struct switchdev_trans trans;
Scott Feldman491d0f12015-05-10 09:47:52 -0700385 int err;
386
387 ASSERT_RTNL();
388
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200389 switchdev_trans_init(&trans);
390
Scott Feldman491d0f12015-05-10 09:47:52 -0700391 /* Phase I: prepare for obj add. Driver/device should fail
392 * here if there are going to be issues in the commit phase,
393 * such as lack of resources or support. The driver/device
394 * should reserve resources needed for the commit phase here,
395 * but should not commit the obj.
396 */
397
Jiri Pirkof623ab72015-09-24 10:02:49 +0200398 trans.ph_prepare = true;
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200399 err = __switchdev_port_obj_add(dev, obj, &trans);
Scott Feldman491d0f12015-05-10 09:47:52 -0700400 if (err) {
401 /* Prepare phase failed: abort the transaction. Any
402 * resources reserved in the prepare phase are
403 * released.
404 */
405
Jiri Pirko9f6467c2015-09-24 10:02:47 +0200406 if (err != -EOPNOTSUPP)
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200407 switchdev_trans_items_destroy(&trans);
Scott Feldman491d0f12015-05-10 09:47:52 -0700408
409 return err;
410 }
411
412 /* Phase II: commit obj add. This cannot fail as a fault
413 * of driver/device. If it does, it's a bug in the driver/device
414 * because the driver said everythings was OK in phase I.
415 */
416
Jiri Pirkof623ab72015-09-24 10:02:49 +0200417 trans.ph_prepare = false;
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200418 err = __switchdev_port_obj_add(dev, obj, &trans);
419 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
Jiri Pirko7ea6eb32015-09-24 10:02:41 +0200420 switchdev_trans_items_warn_destroy(dev, &trans);
Scott Feldman491d0f12015-05-10 09:47:52 -0700421
422 return err;
423}
Jiri Pirko4d429c52015-10-14 19:40:52 +0200424
425static void switchdev_port_obj_add_deferred(struct net_device *dev,
426 const void *data)
427{
428 const struct switchdev_obj *obj = data;
429 int err;
430
431 err = switchdev_port_obj_add_now(dev, obj);
432 if (err && err != -EOPNOTSUPP)
433 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
434 err, obj->id);
435}
436
437static int switchdev_port_obj_add_defer(struct net_device *dev,
438 const struct switchdev_obj *obj)
439{
Scott Feldmane258d912015-10-28 23:17:31 -0700440 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
Jiri Pirko4d429c52015-10-14 19:40:52 +0200441 switchdev_port_obj_add_deferred);
442}
Scott Feldman491d0f12015-05-10 09:47:52 -0700443
444/**
Jiri Pirko4d429c52015-10-14 19:40:52 +0200445 * switchdev_port_obj_add - Add port object
Scott Feldman491d0f12015-05-10 09:47:52 -0700446 *
447 * @dev: port device
Vivien Didelotab069002015-09-29 12:07:17 -0400448 * @id: object ID
Jiri Pirko4d429c52015-10-14 19:40:52 +0200449 * @obj: object to add
450 *
451 * Use a 2-phase prepare-commit transaction model to ensure
452 * system is not left in a partially updated state due to
453 * failure from driver/device.
454 *
455 * rtnl_lock must be held and must not be in atomic section,
456 * in case SWITCHDEV_F_DEFER flag is not set.
Scott Feldman491d0f12015-05-10 09:47:52 -0700457 */
Jiri Pirko4d429c52015-10-14 19:40:52 +0200458int switchdev_port_obj_add(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +0200459 const struct switchdev_obj *obj)
Scott Feldman491d0f12015-05-10 09:47:52 -0700460{
Jiri Pirko4d429c52015-10-14 19:40:52 +0200461 if (obj->flags & SWITCHDEV_F_DEFER)
462 return switchdev_port_obj_add_defer(dev, obj);
463 ASSERT_RTNL();
464 return switchdev_port_obj_add_now(dev, obj);
465}
466EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
467
468static int switchdev_port_obj_del_now(struct net_device *dev,
469 const struct switchdev_obj *obj)
470{
Scott Feldman491d0f12015-05-10 09:47:52 -0700471 const struct switchdev_ops *ops = dev->switchdev_ops;
472 struct net_device *lower_dev;
473 struct list_head *iter;
474 int err = -EOPNOTSUPP;
475
476 if (ops && ops->switchdev_port_obj_del)
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200477 return ops->switchdev_port_obj_del(dev, obj);
Scott Feldman491d0f12015-05-10 09:47:52 -0700478
479 /* Switch device port(s) may be stacked under
480 * bond/team/vlan dev, so recurse down to delete object on
481 * each port.
482 */
483
484 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirko4d429c52015-10-14 19:40:52 +0200485 err = switchdev_port_obj_del_now(lower_dev, obj);
Scott Feldman491d0f12015-05-10 09:47:52 -0700486 if (err)
487 break;
488 }
489
490 return err;
491}
Jiri Pirko4d429c52015-10-14 19:40:52 +0200492
493static void switchdev_port_obj_del_deferred(struct net_device *dev,
494 const void *data)
495{
496 const struct switchdev_obj *obj = data;
497 int err;
498
499 err = switchdev_port_obj_del_now(dev, obj);
500 if (err && err != -EOPNOTSUPP)
501 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
502 err, obj->id);
503}
504
505static int switchdev_port_obj_del_defer(struct net_device *dev,
506 const struct switchdev_obj *obj)
507{
Scott Feldmane258d912015-10-28 23:17:31 -0700508 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
Jiri Pirko4d429c52015-10-14 19:40:52 +0200509 switchdev_port_obj_del_deferred);
510}
511
512/**
513 * switchdev_port_obj_del - Delete port object
514 *
515 * @dev: port device
516 * @id: object ID
517 * @obj: object to delete
518 *
519 * rtnl_lock must be held and must not be in atomic section,
520 * in case SWITCHDEV_F_DEFER flag is not set.
521 */
522int switchdev_port_obj_del(struct net_device *dev,
523 const struct switchdev_obj *obj)
524{
525 if (obj->flags & SWITCHDEV_F_DEFER)
526 return switchdev_port_obj_del_defer(dev, obj);
527 ASSERT_RTNL();
528 return switchdev_port_obj_del_now(dev, obj);
529}
Scott Feldman491d0f12015-05-10 09:47:52 -0700530EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
531
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700532/**
533 * switchdev_port_obj_dump - Dump port objects
534 *
535 * @dev: port device
Vivien Didelot25f07ad2015-09-29 12:07:16 -0400536 * @id: object ID
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700537 * @obj: object to dump
Vivien Didelot25f07ad2015-09-29 12:07:16 -0400538 * @cb: function to call with a filled object
Jiri Pirko771acac2015-10-14 19:40:55 +0200539 *
540 * rtnl_lock must be held.
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700541 */
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200542int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
Jiri Pirko648b4a92015-10-01 11:03:45 +0200543 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700544{
545 const struct switchdev_ops *ops = dev->switchdev_ops;
546 struct net_device *lower_dev;
547 struct list_head *iter;
548 int err = -EOPNOTSUPP;
549
Jiri Pirko771acac2015-10-14 19:40:55 +0200550 ASSERT_RTNL();
551
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700552 if (ops && ops->switchdev_port_obj_dump)
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200553 return ops->switchdev_port_obj_dump(dev, obj, cb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700554
555 /* Switch device port(s) may be stacked under
556 * bond/team/vlan dev, so recurse down to dump objects on
557 * first port at bottom of stack.
558 */
559
560 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200561 err = switchdev_port_obj_dump(lower_dev, obj, cb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700562 break;
563 }
564
565 return err;
566}
567EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
568
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700569static DEFINE_MUTEX(switchdev_mutex);
570static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100571
572/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700573 * register_switchdev_notifier - Register notifier
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100574 * @nb: notifier_block
575 *
576 * Register switch device notifier. This should be used by code
577 * which needs to monitor events happening in particular device.
578 * Return values are same as for atomic_notifier_chain_register().
579 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700580int register_switchdev_notifier(struct notifier_block *nb)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100581{
582 int err;
583
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700584 mutex_lock(&switchdev_mutex);
585 err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
586 mutex_unlock(&switchdev_mutex);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100587 return err;
588}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700589EXPORT_SYMBOL_GPL(register_switchdev_notifier);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100590
591/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700592 * unregister_switchdev_notifier - Unregister notifier
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100593 * @nb: notifier_block
594 *
595 * Unregister switch device notifier.
596 * Return values are same as for atomic_notifier_chain_unregister().
597 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700598int unregister_switchdev_notifier(struct notifier_block *nb)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100599{
600 int err;
601
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700602 mutex_lock(&switchdev_mutex);
603 err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
604 mutex_unlock(&switchdev_mutex);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100605 return err;
606}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700607EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100608
609/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700610 * call_switchdev_notifiers - Call notifiers
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100611 * @val: value passed unmodified to notifier function
612 * @dev: port device
613 * @info: notifier information data
614 *
615 * Call all network notifier blocks. This should be called by driver
616 * when it needs to propagate hardware event.
617 * Return values are same as for atomic_notifier_call_chain().
618 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700619int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
620 struct switchdev_notifier_info *info)
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100621{
622 int err;
623
624 info->dev = dev;
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700625 mutex_lock(&switchdev_mutex);
626 err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
627 mutex_unlock(&switchdev_mutex);
Jiri Pirko03bf0c22015-01-15 23:49:36 +0100628 return err;
629}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700630EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800631
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700632struct switchdev_vlan_dump {
Jiri Pirko8f24f302015-10-01 11:03:43 +0200633 struct switchdev_obj_port_vlan vlan;
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700634 struct sk_buff *skb;
635 u32 filter_mask;
636 u16 flags;
637 u16 begin;
638 u16 end;
639};
640
Vivien Didelote23b0022015-09-29 12:07:13 -0400641static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700642{
643 struct bridge_vlan_info vinfo;
644
645 vinfo.flags = dump->flags;
646
647 if (dump->begin == 0 && dump->end == 0) {
648 return 0;
649 } else if (dump->begin == dump->end) {
650 vinfo.vid = dump->begin;
651 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
652 sizeof(vinfo), &vinfo))
653 return -EMSGSIZE;
654 } else {
655 vinfo.vid = dump->begin;
656 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
657 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
658 sizeof(vinfo), &vinfo))
659 return -EMSGSIZE;
660 vinfo.vid = dump->end;
661 vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
662 vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
663 if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
664 sizeof(vinfo), &vinfo))
665 return -EMSGSIZE;
666 }
667
668 return 0;
669}
670
Jiri Pirko648b4a92015-10-01 11:03:45 +0200671static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700672{
Jiri Pirko648b4a92015-10-01 11:03:45 +0200673 struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700674 struct switchdev_vlan_dump *dump =
Vivien Didelot25f07ad2015-09-29 12:07:16 -0400675 container_of(vlan, struct switchdev_vlan_dump, vlan);
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700676 int err = 0;
677
678 if (vlan->vid_begin > vlan->vid_end)
679 return -EINVAL;
680
681 if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
682 dump->flags = vlan->flags;
683 for (dump->begin = dump->end = vlan->vid_begin;
684 dump->begin <= vlan->vid_end;
685 dump->begin++, dump->end++) {
Vivien Didelote23b0022015-09-29 12:07:13 -0400686 err = switchdev_port_vlan_dump_put(dump);
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700687 if (err)
688 return err;
689 }
690 } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
691 if (dump->begin > vlan->vid_begin &&
692 dump->begin >= vlan->vid_end) {
693 if ((dump->begin - 1) == vlan->vid_end &&
694 dump->flags == vlan->flags) {
695 /* prepend */
696 dump->begin = vlan->vid_begin;
697 } else {
Vivien Didelote23b0022015-09-29 12:07:13 -0400698 err = switchdev_port_vlan_dump_put(dump);
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700699 dump->flags = vlan->flags;
700 dump->begin = vlan->vid_begin;
701 dump->end = vlan->vid_end;
702 }
703 } else if (dump->end <= vlan->vid_begin &&
704 dump->end < vlan->vid_end) {
705 if ((dump->end + 1) == vlan->vid_begin &&
706 dump->flags == vlan->flags) {
707 /* append */
708 dump->end = vlan->vid_end;
709 } else {
Vivien Didelote23b0022015-09-29 12:07:13 -0400710 err = switchdev_port_vlan_dump_put(dump);
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700711 dump->flags = vlan->flags;
712 dump->begin = vlan->vid_begin;
713 dump->end = vlan->vid_end;
714 }
715 } else {
716 err = -EINVAL;
717 }
718 }
719
720 return err;
721}
722
723static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
724 u32 filter_mask)
725{
726 struct switchdev_vlan_dump dump = {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200727 .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700728 .skb = skb,
729 .filter_mask = filter_mask,
730 };
731 int err = 0;
732
733 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
734 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200735 err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
Vivien Didelot25f07ad2015-09-29 12:07:16 -0400736 switchdev_port_vlan_dump_cb);
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700737 if (err)
738 goto err_out;
739 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
740 /* last one */
Vivien Didelote23b0022015-09-29 12:07:13 -0400741 err = switchdev_port_vlan_dump_put(&dump);
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700742 }
743
744err_out:
745 return err == -EOPNOTSUPP ? 0 : err;
746}
747
Scott Feldman8793d0a2015-05-10 09:48:04 -0700748/**
749 * switchdev_port_bridge_getlink - Get bridge port attributes
750 *
751 * @dev: port device
752 *
753 * Called for SELF on rtnl_bridge_getlink to get bridge port
754 * attributes.
755 */
756int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
757 struct net_device *dev, u32 filter_mask,
758 int nlflags)
759{
760 struct switchdev_attr attr = {
Jiri Pirko1f868392015-10-01 11:03:42 +0200761 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
Scott Feldman8793d0a2015-05-10 09:48:04 -0700762 };
763 u16 mode = BRIDGE_MODE_UNDEF;
Ido Schimmel741af002015-10-28 10:16:54 +0100764 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
Scott Feldman8793d0a2015-05-10 09:48:04 -0700765 int err;
766
767 err = switchdev_port_attr_get(dev, &attr);
Vivien Didelot5c8079d2015-06-23 10:26:04 -0400768 if (err && err != -EOPNOTSUPP)
Scott Feldman8793d0a2015-05-10 09:48:04 -0700769 return err;
770
771 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
Scott Feldman7d4f8d82015-06-22 00:27:17 -0700772 attr.u.brport_flags, mask, nlflags,
773 filter_mask, switchdev_port_vlan_fill);
Scott Feldman8793d0a2015-05-10 09:48:04 -0700774}
775EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
776
Scott Feldman47f83282015-05-10 09:47:56 -0700777static int switchdev_port_br_setflag(struct net_device *dev,
778 struct nlattr *nlattr,
779 unsigned long brport_flag)
780{
781 struct switchdev_attr attr = {
Jiri Pirko1f868392015-10-01 11:03:42 +0200782 .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
Scott Feldman47f83282015-05-10 09:47:56 -0700783 };
784 u8 flag = nla_get_u8(nlattr);
785 int err;
786
787 err = switchdev_port_attr_get(dev, &attr);
788 if (err)
789 return err;
790
791 if (flag)
Scott Feldman42275bd2015-05-13 11:16:50 -0700792 attr.u.brport_flags |= brport_flag;
Scott Feldman47f83282015-05-10 09:47:56 -0700793 else
Scott Feldman42275bd2015-05-13 11:16:50 -0700794 attr.u.brport_flags &= ~brport_flag;
Scott Feldman47f83282015-05-10 09:47:56 -0700795
796 return switchdev_port_attr_set(dev, &attr);
797}
798
799static const struct nla_policy
800switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
801 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
802 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
803 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
804 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
805 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
806 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
807 [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 },
808 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
809 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
810 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
811};
812
813static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
814 struct nlattr *protinfo)
815{
816 struct nlattr *attr;
817 int rem;
818 int err;
819
820 err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
821 switchdev_port_bridge_policy);
822 if (err)
823 return err;
824
825 nla_for_each_nested(attr, protinfo, rem) {
826 switch (nla_type(attr)) {
827 case IFLA_BRPORT_LEARNING:
828 err = switchdev_port_br_setflag(dev, attr,
829 BR_LEARNING);
830 break;
831 case IFLA_BRPORT_LEARNING_SYNC:
832 err = switchdev_port_br_setflag(dev, attr,
833 BR_LEARNING_SYNC);
834 break;
Ido Schimmel741af002015-10-28 10:16:54 +0100835 case IFLA_BRPORT_UNICAST_FLOOD:
836 err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
837 break;
Scott Feldman47f83282015-05-10 09:47:56 -0700838 default:
839 err = -EOPNOTSUPP;
840 break;
841 }
842 if (err)
843 return err;
844 }
845
846 return 0;
847}
848
849static int switchdev_port_br_afspec(struct net_device *dev,
850 struct nlattr *afspec,
851 int (*f)(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +0200852 const struct switchdev_obj *obj))
Scott Feldman47f83282015-05-10 09:47:56 -0700853{
854 struct nlattr *attr;
855 struct bridge_vlan_info *vinfo;
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200856 struct switchdev_obj_port_vlan vlan = {
857 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
858 };
Scott Feldman47f83282015-05-10 09:47:56 -0700859 int rem;
860 int err;
861
862 nla_for_each_nested(attr, afspec, rem) {
863 if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
864 continue;
865 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
866 return -EINVAL;
867 vinfo = nla_data(attr);
Nikolay Aleksandrov87aaf2c2015-10-12 14:31:01 +0200868 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
869 return -EINVAL;
Vivien Didelotab069002015-09-29 12:07:17 -0400870 vlan.flags = vinfo->flags;
Scott Feldman47f83282015-05-10 09:47:56 -0700871 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
Vivien Didelotab069002015-09-29 12:07:17 -0400872 if (vlan.vid_begin)
Scott Feldman47f83282015-05-10 09:47:56 -0700873 return -EINVAL;
Vivien Didelotab069002015-09-29 12:07:17 -0400874 vlan.vid_begin = vinfo->vid;
Nikolay Aleksandrovcc02aa82015-10-12 14:01:39 +0200875 /* don't allow range of pvids */
876 if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
877 return -EINVAL;
Scott Feldman47f83282015-05-10 09:47:56 -0700878 } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
Vivien Didelotab069002015-09-29 12:07:17 -0400879 if (!vlan.vid_begin)
Scott Feldman47f83282015-05-10 09:47:56 -0700880 return -EINVAL;
Vivien Didelotab069002015-09-29 12:07:17 -0400881 vlan.vid_end = vinfo->vid;
882 if (vlan.vid_end <= vlan.vid_begin)
Scott Feldman47f83282015-05-10 09:47:56 -0700883 return -EINVAL;
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200884 err = f(dev, &vlan.obj);
Scott Feldman47f83282015-05-10 09:47:56 -0700885 if (err)
886 return err;
Scott Feldman3a7bde52015-10-28 23:17:30 -0700887 vlan.vid_begin = 0;
Scott Feldman47f83282015-05-10 09:47:56 -0700888 } else {
Vivien Didelotab069002015-09-29 12:07:17 -0400889 if (vlan.vid_begin)
Scott Feldman47f83282015-05-10 09:47:56 -0700890 return -EINVAL;
Vivien Didelotab069002015-09-29 12:07:17 -0400891 vlan.vid_begin = vinfo->vid;
892 vlan.vid_end = vinfo->vid;
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200893 err = f(dev, &vlan.obj);
Scott Feldman47f83282015-05-10 09:47:56 -0700894 if (err)
895 return err;
Scott Feldman3a7bde52015-10-28 23:17:30 -0700896 vlan.vid_begin = 0;
Scott Feldman47f83282015-05-10 09:47:56 -0700897 }
898 }
899
900 return 0;
901}
902
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800903/**
Scott Feldman47f83282015-05-10 09:47:56 -0700904 * switchdev_port_bridge_setlink - Set bridge port attributes
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800905 *
906 * @dev: port device
Scott Feldman47f83282015-05-10 09:47:56 -0700907 * @nlh: netlink header
908 * @flags: netlink flags
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800909 *
Scott Feldman47f83282015-05-10 09:47:56 -0700910 * Called for SELF on rtnl_bridge_setlink to set bridge port
911 * attributes.
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800912 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700913int switchdev_port_bridge_setlink(struct net_device *dev,
914 struct nlmsghdr *nlh, u16 flags)
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800915{
Scott Feldman47f83282015-05-10 09:47:56 -0700916 struct nlattr *protinfo;
917 struct nlattr *afspec;
918 int err = 0;
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800919
Scott Feldman47f83282015-05-10 09:47:56 -0700920 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
921 IFLA_PROTINFO);
922 if (protinfo) {
923 err = switchdev_port_br_setlink_protinfo(dev, protinfo);
924 if (err)
925 return err;
926 }
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800927
Scott Feldman47f83282015-05-10 09:47:56 -0700928 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
929 IFLA_AF_SPEC);
930 if (afspec)
931 err = switchdev_port_br_afspec(dev, afspec,
932 switchdev_port_obj_add);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800933
Scott Feldman47f83282015-05-10 09:47:56 -0700934 return err;
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800935}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700936EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800937
938/**
Scott Feldman5c34e022015-05-10 09:48:00 -0700939 * switchdev_port_bridge_dellink - Set bridge port attributes
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800940 *
941 * @dev: port device
Scott Feldman5c34e022015-05-10 09:48:00 -0700942 * @nlh: netlink header
943 * @flags: netlink flags
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800944 *
Scott Feldman5c34e022015-05-10 09:48:00 -0700945 * Called for SELF on rtnl_bridge_dellink to set bridge port
946 * attributes.
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800947 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700948int switchdev_port_bridge_dellink(struct net_device *dev,
949 struct nlmsghdr *nlh, u16 flags)
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800950{
Scott Feldman5c34e022015-05-10 09:48:00 -0700951 struct nlattr *afspec;
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800952
Scott Feldman5c34e022015-05-10 09:48:00 -0700953 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
954 IFLA_AF_SPEC);
955 if (afspec)
956 return switchdev_port_br_afspec(dev, afspec,
957 switchdev_port_obj_del);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800958
Scott Feldman5c34e022015-05-10 09:48:00 -0700959 return 0;
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800960}
Jiri Pirkoebb9a032015-05-10 09:47:46 -0700961EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
Roopa Prabhu8a44dbb2015-01-29 22:40:13 -0800962
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700963/**
964 * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
965 *
966 * @ndmsg: netlink hdr
967 * @nlattr: netlink attributes
968 * @dev: port device
969 * @addr: MAC address to add
970 * @vid: VLAN to add
971 *
972 * Add FDB entry to switch device.
973 */
974int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
975 struct net_device *dev, const unsigned char *addr,
976 u16 vid, u16 nlm_flags)
977{
Jiri Pirko52ba57c2015-10-01 11:03:44 +0200978 struct switchdev_obj_port_fdb fdb = {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200979 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
Vivien Didelotab069002015-09-29 12:07:17 -0400980 .vid = vid,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700981 };
982
Jiri Pirko850d0cb2015-10-14 19:40:51 +0200983 ether_addr_copy(fdb.addr, addr);
Jiri Pirko9e8f4a52015-10-01 11:03:46 +0200984 return switchdev_port_obj_add(dev, &fdb.obj);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -0700985}
986EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
987
988/**
989 * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
990 *
991 * @ndmsg: netlink hdr
992 * @nlattr: netlink attributes
993 * @dev: port device
994 * @addr: MAC address to delete
995 * @vid: VLAN to delete
996 *
997 * Delete FDB entry from switch device.
998 */
999int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1000 struct net_device *dev, const unsigned char *addr,
1001 u16 vid)
1002{
Jiri Pirko52ba57c2015-10-01 11:03:44 +02001003 struct switchdev_obj_port_fdb fdb = {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001004 .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
Vivien Didelotab069002015-09-29 12:07:17 -04001005 .vid = vid,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001006 };
1007
Jiri Pirko850d0cb2015-10-14 19:40:51 +02001008 ether_addr_copy(fdb.addr, addr);
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001009 return switchdev_port_obj_del(dev, &fdb.obj);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001010}
1011EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
1012
1013struct switchdev_fdb_dump {
Jiri Pirko52ba57c2015-10-01 11:03:44 +02001014 struct switchdev_obj_port_fdb fdb;
Vivien Didelote02a06b22015-09-29 12:07:14 -04001015 struct net_device *dev;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001016 struct sk_buff *skb;
1017 struct netlink_callback *cb;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001018 int idx;
1019};
1020
Jiri Pirko648b4a92015-10-01 11:03:45 +02001021static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001022{
Jiri Pirko648b4a92015-10-01 11:03:45 +02001023 struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001024 struct switchdev_fdb_dump *dump =
Vivien Didelot25f07ad2015-09-29 12:07:16 -04001025 container_of(fdb, struct switchdev_fdb_dump, fdb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001026 u32 portid = NETLINK_CB(dump->cb->skb).portid;
1027 u32 seq = dump->cb->nlh->nlmsg_seq;
1028 struct nlmsghdr *nlh;
1029 struct ndmsg *ndm;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001030
1031 if (dump->idx < dump->cb->args[0])
1032 goto skip;
1033
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001034 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
1035 sizeof(*ndm), NLM_F_MULTI);
1036 if (!nlh)
1037 return -EMSGSIZE;
1038
1039 ndm = nlmsg_data(nlh);
1040 ndm->ndm_family = AF_BRIDGE;
1041 ndm->ndm_pad1 = 0;
1042 ndm->ndm_pad2 = 0;
1043 ndm->ndm_flags = NTF_SELF;
1044 ndm->ndm_type = 0;
Vivien Didelote02a06b22015-09-29 12:07:14 -04001045 ndm->ndm_ifindex = dump->dev->ifindex;
Vivien Didelot25f07ad2015-09-29 12:07:16 -04001046 ndm->ndm_state = fdb->ndm_state;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001047
Vivien Didelot25f07ad2015-09-29 12:07:16 -04001048 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001049 goto nla_put_failure;
1050
Vivien Didelot25f07ad2015-09-29 12:07:16 -04001051 if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001052 goto nla_put_failure;
1053
1054 nlmsg_end(dump->skb, nlh);
1055
1056skip:
1057 dump->idx++;
1058 return 0;
1059
1060nla_put_failure:
1061 nlmsg_cancel(dump->skb, nlh);
1062 return -EMSGSIZE;
1063}
1064
1065/**
1066 * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
1067 *
1068 * @skb: netlink skb
1069 * @cb: netlink callback
1070 * @dev: port device
1071 * @filter_dev: filter device
1072 * @idx:
1073 *
1074 * Delete FDB entry from switch device.
1075 */
1076int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
1077 struct net_device *dev,
1078 struct net_device *filter_dev, int idx)
1079{
1080 struct switchdev_fdb_dump dump = {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001081 .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
Vivien Didelote02a06b22015-09-29 12:07:14 -04001082 .dev = dev,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001083 .skb = skb,
1084 .cb = cb,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001085 .idx = idx,
1086 };
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001087
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001088 switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07001089 return dump.idx;
1090}
1091EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
1092
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001093static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001094{
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07001095 const struct switchdev_ops *ops = dev->switchdev_ops;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001096 struct net_device *lower_dev;
1097 struct net_device *port_dev;
1098 struct list_head *iter;
1099
1100 /* Recusively search down until we find a sw port dev.
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001101 * (A sw port dev supports switchdev_port_attr_get).
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001102 */
1103
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001104 if (ops && ops->switchdev_port_attr_get)
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001105 return dev;
1106
1107 netdev_for_each_lower_dev(dev, lower_dev, iter) {
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001108 port_dev = switchdev_get_lowest_dev(lower_dev);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001109 if (port_dev)
1110 return port_dev;
1111 }
1112
1113 return NULL;
1114}
1115
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001116static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001117{
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001118 struct switchdev_attr attr = {
Jiri Pirko1f868392015-10-01 11:03:42 +02001119 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001120 };
1121 struct switchdev_attr prev_attr;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001122 struct net_device *dev = NULL;
1123 int nhsel;
1124
Jiri Pirko771acac2015-10-14 19:40:55 +02001125 ASSERT_RTNL();
1126
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001127 /* For this route, all nexthop devs must be on the same switch. */
1128
1129 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1130 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1131
1132 if (!nh->nh_dev)
1133 return NULL;
1134
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001135 dev = switchdev_get_lowest_dev(nh->nh_dev);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001136 if (!dev)
1137 return NULL;
1138
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001139 if (switchdev_port_attr_get(dev, &attr))
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001140 return NULL;
1141
Scott Feldmand754f982015-07-18 18:24:49 -07001142 if (nhsel > 0 &&
1143 !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001144 return NULL;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001145
Scott Feldmanf8e20a92015-05-10 09:47:49 -07001146 prev_attr = attr;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001147 }
1148
1149 return dev;
1150}
1151
Scott Feldman5e8d9042015-03-05 21:21:15 -08001152/**
Scott Feldman7616dcb2015-06-03 20:43:43 -07001153 * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
Scott Feldman5e8d9042015-03-05 21:21:15 -08001154 *
1155 * @dst: route's IPv4 destination address
1156 * @dst_len: destination address length (prefix length)
1157 * @fi: route FIB info structure
1158 * @tos: route TOS
1159 * @type: route type
Scott Feldmanf8f21472015-03-09 13:59:09 -07001160 * @nlflags: netlink flags passed in (NLM_F_*)
Scott Feldman5e8d9042015-03-05 21:21:15 -08001161 * @tb_id: route table ID
1162 *
Scott Feldman7616dcb2015-06-03 20:43:43 -07001163 * Add/modify switch IPv4 route entry.
Scott Feldman5e8d9042015-03-05 21:21:15 -08001164 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001165int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
1166 u8 tos, u8 type, u32 nlflags, u32 tb_id)
Scott Feldman5e8d9042015-03-05 21:21:15 -08001167{
Vivien Didelotab069002015-09-29 12:07:17 -04001168 struct switchdev_obj_ipv4_fib ipv4_fib = {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001169 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
Vivien Didelotab069002015-09-29 12:07:17 -04001170 .dst = dst,
1171 .dst_len = dst_len,
Vivien Didelotab069002015-09-29 12:07:17 -04001172 .tos = tos,
1173 .type = type,
1174 .nlflags = nlflags,
1175 .tb_id = tb_id,
Scott Feldman58c2cb12015-05-10 09:48:06 -07001176 };
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001177 struct net_device *dev;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001178 int err = 0;
1179
Jiri Pirko850d0cb2015-10-14 19:40:51 +02001180 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
1181
Scott Feldman8e05fd72015-03-05 21:21:19 -08001182 /* Don't offload route if using custom ip rules or if
1183 * IPv4 FIB offloading has been disabled completely.
1184 */
1185
Scott Feldmane1315db2015-03-06 01:14:36 -08001186#ifdef CONFIG_IP_MULTIPLE_TABLES
1187 if (fi->fib_net->ipv4.fib_has_custom_rules)
1188 return 0;
1189#endif
1190
1191 if (fi->fib_net->ipv4.fib_offload_disabled)
Scott Feldman104616e2015-03-05 21:21:16 -08001192 return 0;
1193
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001194 dev = switchdev_get_dev_by_nhs(fi);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001195 if (!dev)
1196 return 0;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001197
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001198 err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
Scott Feldman58c2cb12015-05-10 09:48:06 -07001199 if (!err)
David S. Miller36583eb2015-05-23 01:22:35 -04001200 fi->fib_flags |= RTNH_F_OFFLOAD;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001201
Scott Feldmanaf201f72015-06-10 17:04:49 -07001202 return err == -EOPNOTSUPP ? 0 : err;
Scott Feldman5e8d9042015-03-05 21:21:15 -08001203}
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001204EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
Scott Feldman5e8d9042015-03-05 21:21:15 -08001205
1206/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001207 * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
Scott Feldman5e8d9042015-03-05 21:21:15 -08001208 *
1209 * @dst: route's IPv4 destination address
1210 * @dst_len: destination address length (prefix length)
1211 * @fi: route FIB info structure
1212 * @tos: route TOS
1213 * @type: route type
1214 * @tb_id: route table ID
1215 *
1216 * Delete IPv4 route entry from switch device.
1217 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001218int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
1219 u8 tos, u8 type, u32 tb_id)
Scott Feldman5e8d9042015-03-05 21:21:15 -08001220{
Vivien Didelotab069002015-09-29 12:07:17 -04001221 struct switchdev_obj_ipv4_fib ipv4_fib = {
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001222 .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
Vivien Didelotab069002015-09-29 12:07:17 -04001223 .dst = dst,
1224 .dst_len = dst_len,
Vivien Didelotab069002015-09-29 12:07:17 -04001225 .tos = tos,
1226 .type = type,
1227 .nlflags = 0,
1228 .tb_id = tb_id,
Scott Feldman58c2cb12015-05-10 09:48:06 -07001229 };
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001230 struct net_device *dev;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001231 int err = 0;
1232
Jiri Pirko850d0cb2015-10-14 19:40:51 +02001233 memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
1234
Roopa Prabhueea39942015-05-13 21:17:41 -07001235 if (!(fi->fib_flags & RTNH_F_OFFLOAD))
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001236 return 0;
1237
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001238 dev = switchdev_get_dev_by_nhs(fi);
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001239 if (!dev)
1240 return 0;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001241
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02001242 err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
Scott Feldman58c2cb12015-05-10 09:48:06 -07001243 if (!err)
David S. Miller36583eb2015-05-23 01:22:35 -04001244 fi->fib_flags &= ~RTNH_F_OFFLOAD;
Scott Feldmanb5d6fbd2015-03-05 21:21:17 -08001245
Scott Feldmanaf201f72015-06-10 17:04:49 -07001246 return err == -EOPNOTSUPP ? 0 : err;
Scott Feldman5e8d9042015-03-05 21:21:15 -08001247}
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001248EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
Scott Feldman8e05fd72015-03-05 21:21:19 -08001249
1250/**
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001251 * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
Scott Feldman8e05fd72015-03-05 21:21:19 -08001252 *
1253 * @fi: route FIB info structure
1254 */
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001255void switchdev_fib_ipv4_abort(struct fib_info *fi)
Scott Feldman8e05fd72015-03-05 21:21:19 -08001256{
1257 /* There was a problem installing this route to the offload
1258 * device. For now, until we come up with more refined
1259 * policy handling, abruptly end IPv4 fib offloading for
1260 * for entire net by flushing offload device(s) of all
1261 * IPv4 routes, and mark IPv4 fib offloading broken from
1262 * this point forward.
1263 */
1264
1265 fib_flush_external(fi->fib_net);
1266 fi->fib_net->ipv4.fib_offload_disabled = true;
1267}
Jiri Pirkoebb9a032015-05-10 09:47:46 -07001268EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
Scott Feldman1a3b2ec2015-07-18 18:24:50 -07001269
1270static bool switchdev_port_same_parent_id(struct net_device *a,
1271 struct net_device *b)
1272{
1273 struct switchdev_attr a_attr = {
Jiri Pirko1f868392015-10-01 11:03:42 +02001274 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
Scott Feldman1a3b2ec2015-07-18 18:24:50 -07001275 .flags = SWITCHDEV_F_NO_RECURSE,
1276 };
1277 struct switchdev_attr b_attr = {
Jiri Pirko1f868392015-10-01 11:03:42 +02001278 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
Scott Feldman1a3b2ec2015-07-18 18:24:50 -07001279 .flags = SWITCHDEV_F_NO_RECURSE,
1280 };
1281
1282 if (switchdev_port_attr_get(a, &a_attr) ||
1283 switchdev_port_attr_get(b, &b_attr))
1284 return false;
1285
1286 return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
1287}
1288
1289static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
1290 struct net_device *group_dev)
1291{
1292 struct net_device *lower_dev;
1293 struct list_head *iter;
1294
1295 netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
1296 if (lower_dev == dev)
1297 continue;
1298 if (switchdev_port_same_parent_id(dev, lower_dev))
1299 return lower_dev->offload_fwd_mark;
1300 return switchdev_port_fwd_mark_get(dev, lower_dev);
1301 }
1302
1303 return dev->ifindex;
1304}
1305
1306static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
1307 u32 old_mark, u32 *reset_mark)
1308{
1309 struct net_device *lower_dev;
1310 struct list_head *iter;
1311
1312 netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
1313 if (lower_dev->offload_fwd_mark == old_mark) {
1314 if (!*reset_mark)
1315 *reset_mark = lower_dev->ifindex;
1316 lower_dev->offload_fwd_mark = *reset_mark;
1317 }
1318 switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
1319 }
1320}
1321
1322/**
1323 * switchdev_port_fwd_mark_set - Set port offload forwarding mark
1324 *
1325 * @dev: port device
1326 * @group_dev: containing device
1327 * @joining: true if dev is joining group; false if leaving group
1328 *
1329 * An ungrouped port's offload mark is just its ifindex. A grouped
1330 * port's (member of a bridge, for example) offload mark is the ifindex
1331 * of one of the ports in the group with the same parent (switch) ID.
1332 * Ports on the same device in the same group will have the same mark.
1333 *
1334 * Example:
1335 *
1336 * br0 ifindex=9
1337 * sw1p1 ifindex=2 mark=2
1338 * sw1p2 ifindex=3 mark=2
1339 * sw2p1 ifindex=4 mark=5
1340 * sw2p2 ifindex=5 mark=5
1341 *
1342 * If sw2p2 leaves the bridge, we'll have:
1343 *
1344 * br0 ifindex=9
1345 * sw1p1 ifindex=2 mark=2
1346 * sw1p2 ifindex=3 mark=2
1347 * sw2p1 ifindex=4 mark=4
1348 * sw2p2 ifindex=5 mark=5
1349 */
1350void switchdev_port_fwd_mark_set(struct net_device *dev,
1351 struct net_device *group_dev,
1352 bool joining)
1353{
1354 u32 mark = dev->ifindex;
1355 u32 reset_mark = 0;
1356
Jiri Pirko771acac2015-10-14 19:40:55 +02001357 if (group_dev) {
1358 ASSERT_RTNL();
1359 if (joining)
1360 mark = switchdev_port_fwd_mark_get(dev, group_dev);
1361 else if (dev->offload_fwd_mark == mark)
Scott Feldman1a3b2ec2015-07-18 18:24:50 -07001362 /* Ohoh, this port was the mark reference port,
1363 * but it's leaving the group, so reset the
1364 * mark for the remaining ports in the group.
1365 */
1366 switchdev_port_fwd_mark_reset(group_dev, mark,
1367 &reset_mark);
1368 }
1369
1370 dev->offload_fwd_mark = mark;
1371}
1372EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);