blob: 7d0bd027cf1ef2d6bcfbe3f24bc16ff22be8c71f [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Jiri Pirko22a67762017-02-03 10:29:07 +01003 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko56ade8f2015-10-16 14:01:37 +02005 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
Jiri Pirko1d20d232016-10-27 15:12:59 +020040#include <linux/pci.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020041#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/slab.h>
45#include <linux/device.h>
46#include <linux/skbuff.h>
47#include <linux/if_vlan.h>
48#include <linux/if_bridge.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010052#include <linux/list.h>
Ido Schimmel80bedf12016-06-20 23:03:59 +020053#include <linux/notifier.h>
Ido Schimmel90183b92016-04-06 17:10:08 +020054#include <linux/dcbnl.h>
Ido Schimmel99724c12016-07-04 08:23:14 +020055#include <linux/inetdevice.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020056#include <net/switchdev.h>
Yotam Gigi763b4b72016-07-21 12:03:17 +020057#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h>
Jiri Pirkoe7322632016-09-01 10:37:43 +020059#include <net/netevent.h>
Yotam Gigi98d0f7b2017-01-23 11:07:11 +010060#include <net/tc_act/tc_sample.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020061
62#include "spectrum.h"
Jiri Pirko1d20d232016-10-27 15:12:59 +020063#include "pci.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020064#include "core.h"
65#include "reg.h"
66#include "port.h"
67#include "trap.h"
68#include "txheader.h"
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +010069#include "spectrum_cnt.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020070
71static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
72static const char mlxsw_sp_driver_version[] = "1.0";
73
74/* tx_hdr_version
75 * Tx header version.
76 * Must be set to 1.
77 */
78MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
79
80/* tx_hdr_ctl
81 * Packet control type.
82 * 0 - Ethernet control (e.g. EMADs, LACP)
83 * 1 - Ethernet data
84 */
85MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
86
87/* tx_hdr_proto
88 * Packet protocol type. Must be set to 1 (Ethernet).
89 */
90MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
91
92/* tx_hdr_rx_is_router
93 * Packet is sent from the router. Valid for data packets only.
94 */
95MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
96
97/* tx_hdr_fid_valid
98 * Indicates if the 'fid' field is valid and should be used for
99 * forwarding lookup. Valid for data packets only.
100 */
101MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
102
103/* tx_hdr_swid
104 * Switch partition ID. Must be set to 0.
105 */
106MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
107
108/* tx_hdr_control_tclass
109 * Indicates if the packet should use the control TClass and not one
110 * of the data TClasses.
111 */
112MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
113
114/* tx_hdr_etclass
115 * Egress TClass to be used on the egress device on the egress port.
116 */
117MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
118
119/* tx_hdr_port_mid
120 * Destination local port for unicast packets.
121 * Destination multicast ID for multicast packets.
122 *
123 * Control packets are directed to a specific egress port, while data
124 * packets are transmitted through the CPU port (0) into the switch partition,
125 * where forwarding rules are applied.
126 */
127MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
128
129/* tx_hdr_fid
130 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
131 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
132 * Valid for data packets only.
133 */
134MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
135
136/* tx_hdr_type
137 * 0 - Data packets
138 * 6 - Control packets
139 */
140MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
141
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100142int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
143 unsigned int counter_index, u64 *packets,
144 u64 *bytes)
145{
146 char mgpc_pl[MLXSW_REG_MGPC_LEN];
147 int err;
148
149 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
150 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
152 if (err)
153 return err;
154 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
155 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
156 return 0;
157}
158
159static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
160 unsigned int counter_index)
161{
162 char mgpc_pl[MLXSW_REG_MGPC_LEN];
163
164 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
165 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
167}
168
169int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
170 unsigned int *p_counter_index)
171{
172 int err;
173
174 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
175 p_counter_index);
176 if (err)
177 return err;
178 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
179 if (err)
180 goto err_counter_clear;
181 return 0;
182
183err_counter_clear:
184 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
185 *p_counter_index);
186 return err;
187}
188
189void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
190 unsigned int counter_index)
191{
192 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
193 counter_index);
194}
195
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200196static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
197 const struct mlxsw_tx_info *tx_info)
198{
199 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
200
201 memset(txhdr, 0, MLXSW_TXHDR_LEN);
202
203 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
204 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
205 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
206 mlxsw_tx_hdr_swid_set(txhdr, 0);
207 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
208 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
209 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
210}
211
212static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
213{
Elad Raz5b090742016-10-28 21:35:46 +0200214 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200215 int err;
216
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
218 if (err)
219 return err;
220 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
221 return 0;
222}
223
Yotam Gigi763b4b72016-07-21 12:03:17 +0200224static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
225{
Yotam Gigi763b4b72016-07-21 12:03:17 +0200226 int i;
227
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200228 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
Yotam Gigi763b4b72016-07-21 12:03:17 +0200229 return -EIO;
230
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200231 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
232 MAX_SPAN);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200233 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
234 sizeof(struct mlxsw_sp_span_entry),
235 GFP_KERNEL);
236 if (!mlxsw_sp->span.entries)
237 return -ENOMEM;
238
239 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
240 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
241
242 return 0;
243}
244
245static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
246{
247 int i;
248
249 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
250 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
251
252 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
253 }
254 kfree(mlxsw_sp->span.entries);
255}
256
257static struct mlxsw_sp_span_entry *
258mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
259{
260 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
261 struct mlxsw_sp_span_entry *span_entry;
262 char mpat_pl[MLXSW_REG_MPAT_LEN];
263 u8 local_port = port->local_port;
264 int index;
265 int i;
266 int err;
267
268 /* find a free entry to use */
269 index = -1;
270 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
271 if (!mlxsw_sp->span.entries[i].used) {
272 index = i;
273 span_entry = &mlxsw_sp->span.entries[i];
274 break;
275 }
276 }
277 if (index < 0)
278 return NULL;
279
280 /* create a new port analayzer entry for local_port */
281 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
282 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
283 if (err)
284 return NULL;
285
286 span_entry->used = true;
287 span_entry->id = index;
Yotam Gigi2d644d42016-11-11 16:34:25 +0100288 span_entry->ref_count = 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200289 span_entry->local_port = local_port;
290 return span_entry;
291}
292
293static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
294 struct mlxsw_sp_span_entry *span_entry)
295{
296 u8 local_port = span_entry->local_port;
297 char mpat_pl[MLXSW_REG_MPAT_LEN];
298 int pa_id = span_entry->id;
299
300 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
301 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
302 span_entry->used = false;
303}
304
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200305static struct mlxsw_sp_span_entry *
306mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200307{
308 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
309 int i;
310
311 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
312 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
313
314 if (curr->used && curr->local_port == port->local_port)
315 return curr;
316 }
317 return NULL;
318}
319
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200320static struct mlxsw_sp_span_entry
321*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200322{
323 struct mlxsw_sp_span_entry *span_entry;
324
325 span_entry = mlxsw_sp_span_entry_find(port);
326 if (span_entry) {
Yotam Gigi2d644d42016-11-11 16:34:25 +0100327 /* Already exists, just take a reference */
Yotam Gigi763b4b72016-07-21 12:03:17 +0200328 span_entry->ref_count++;
329 return span_entry;
330 }
331
332 return mlxsw_sp_span_entry_create(port);
333}
334
335static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_span_entry *span_entry)
337{
Yotam Gigi2d644d42016-11-11 16:34:25 +0100338 WARN_ON(!span_entry->ref_count);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200339 if (--span_entry->ref_count == 0)
340 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
341 return 0;
342}
343
344static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
345{
346 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
347 struct mlxsw_sp_span_inspected_port *p;
348 int i;
349
350 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
351 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
352
353 list_for_each_entry(p, &curr->bound_ports_list, list)
354 if (p->local_port == port->local_port &&
355 p->type == MLXSW_SP_SPAN_EGRESS)
356 return true;
357 }
358
359 return false;
360}
361
362static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
363{
364 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
365}
366
367static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
368{
369 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
370 char sbib_pl[MLXSW_REG_SBIB_LEN];
371 int err;
372
373 /* If port is egress mirrored, the shared buffer size should be
374 * updated according to the mtu value
375 */
376 if (mlxsw_sp_span_is_egress_mirror(port)) {
377 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
378 mlxsw_sp_span_mtu_to_buffsize(mtu));
379 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
380 if (err) {
381 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
382 return err;
383 }
384 }
385
386 return 0;
387}
388
389static struct mlxsw_sp_span_inspected_port *
390mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
391 struct mlxsw_sp_span_entry *span_entry)
392{
393 struct mlxsw_sp_span_inspected_port *p;
394
395 list_for_each_entry(p, &span_entry->bound_ports_list, list)
396 if (port->local_port == p->local_port)
397 return p;
398 return NULL;
399}
400
401static int
402mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
403 struct mlxsw_sp_span_entry *span_entry,
404 enum mlxsw_sp_span_type type)
405{
406 struct mlxsw_sp_span_inspected_port *inspected_port;
407 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
408 char mpar_pl[MLXSW_REG_MPAR_LEN];
409 char sbib_pl[MLXSW_REG_SBIB_LEN];
410 int pa_id = span_entry->id;
411 int err;
412
413 /* if it is an egress SPAN, bind a shared buffer to it */
414 if (type == MLXSW_SP_SPAN_EGRESS) {
415 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
416 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
417 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
418 if (err) {
419 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
420 return err;
421 }
422 }
423
424 /* bind the port to the SPAN entry */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200425 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
426 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200427 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
428 if (err)
429 goto err_mpar_reg_write;
430
431 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
432 if (!inspected_port) {
433 err = -ENOMEM;
434 goto err_inspected_port_alloc;
435 }
436 inspected_port->local_port = port->local_port;
437 inspected_port->type = type;
438 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
439
440 return 0;
441
442err_mpar_reg_write:
443err_inspected_port_alloc:
444 if (type == MLXSW_SP_SPAN_EGRESS) {
445 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
446 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
447 }
448 return err;
449}
450
451static void
452mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
453 struct mlxsw_sp_span_entry *span_entry,
454 enum mlxsw_sp_span_type type)
455{
456 struct mlxsw_sp_span_inspected_port *inspected_port;
457 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
458 char mpar_pl[MLXSW_REG_MPAR_LEN];
459 char sbib_pl[MLXSW_REG_SBIB_LEN];
460 int pa_id = span_entry->id;
461
462 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
463 if (!inspected_port)
464 return;
465
466 /* remove the inspected port */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200467 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
468 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200469 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
470
471 /* remove the SBIB buffer if it was egress SPAN */
472 if (type == MLXSW_SP_SPAN_EGRESS) {
473 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
474 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
475 }
476
477 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
478
479 list_del(&inspected_port->list);
480 kfree(inspected_port);
481}
482
483static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
484 struct mlxsw_sp_port *to,
485 enum mlxsw_sp_span_type type)
486{
487 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
488 struct mlxsw_sp_span_entry *span_entry;
489 int err;
490
491 span_entry = mlxsw_sp_span_entry_get(to);
492 if (!span_entry)
493 return -ENOENT;
494
495 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
496 span_entry->id);
497
498 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
499 if (err)
500 goto err_port_bind;
501
502 return 0;
503
504err_port_bind:
505 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
506 return err;
507}
508
509static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
510 struct mlxsw_sp_port *to,
511 enum mlxsw_sp_span_type type)
512{
513 struct mlxsw_sp_span_entry *span_entry;
514
515 span_entry = mlxsw_sp_span_entry_find(to);
516 if (!span_entry) {
517 netdev_err(from->dev, "no span entry found\n");
518 return;
519 }
520
521 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
522 span_entry->id);
523 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
524}
525
Yotam Gigi98d0f7b2017-01-23 11:07:11 +0100526static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
527 bool enable, u32 rate)
528{
529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
530 char mpsc_pl[MLXSW_REG_MPSC_LEN];
531
532 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
533 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
534}
535
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200536static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
537 bool is_up)
538{
539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
540 char paos_pl[MLXSW_REG_PAOS_LEN];
541
542 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
543 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
544 MLXSW_PORT_ADMIN_STATUS_DOWN);
545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
546}
547
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200548static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
549 unsigned char *addr)
550{
551 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
552 char ppad_pl[MLXSW_REG_PPAD_LEN];
553
554 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
555 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
557}
558
559static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
560{
561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
562 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
563
564 ether_addr_copy(addr, mlxsw_sp->base_mac);
565 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
566 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
567}
568
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200569static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
570{
571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572 char pmtu_pl[MLXSW_REG_PMTU_LEN];
573 int max_mtu;
574 int err;
575
576 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
577 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
578 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
579 if (err)
580 return err;
581 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
582
583 if (mtu > max_mtu)
584 return -EINVAL;
585
586 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
587 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
588}
589
Ido Schimmelbe945352016-06-09 09:51:39 +0200590static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
591 u8 swid)
592{
593 char pspa_pl[MLXSW_REG_PSPA_LEN];
594
595 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
597}
598
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200599static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
600{
601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200602
Ido Schimmelbe945352016-06-09 09:51:39 +0200603 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
604 swid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200605}
606
607static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
608 bool enable)
609{
610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
611 char svpe_pl[MLXSW_REG_SVPE_LEN];
612
613 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
615}
616
617int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
618 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
619 u16 vid)
620{
621 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 char svfa_pl[MLXSW_REG_SVFA_LEN];
623
624 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
625 fid, vid);
626 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
627}
628
Ido Schimmel584d73d2016-08-24 12:00:26 +0200629int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
630 u16 vid_begin, u16 vid_end,
631 bool learn_enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200632{
633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
634 char *spvmlr_pl;
635 int err;
636
637 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
638 if (!spvmlr_pl)
639 return -ENOMEM;
Ido Schimmel584d73d2016-08-24 12:00:26 +0200640 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
641 vid_end, learn_enable);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200642 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
643 kfree(spvmlr_pl);
644 return err;
645}
646
Ido Schimmel584d73d2016-08-24 12:00:26 +0200647static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
648 u16 vid, bool learn_enable)
649{
650 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
651 learn_enable);
652}
653
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200654static int
655mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
656{
657 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
658 char sspr_pl[MLXSW_REG_SSPR_LEN];
659
660 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
661 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
662}
663
Ido Schimmeld664b412016-06-09 09:51:40 +0200664static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
665 u8 local_port, u8 *p_module,
666 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200667{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200668 char pmlp_pl[MLXSW_REG_PMLP_LEN];
669 int err;
670
Ido Schimmel558c2d52016-02-26 17:32:29 +0100671 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200672 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
673 if (err)
674 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100675 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
676 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200677 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200678 return 0;
679}
680
Ido Schimmel18f1e702016-02-26 17:32:31 +0100681static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
682 u8 module, u8 width, u8 lane)
683{
684 char pmlp_pl[MLXSW_REG_PMLP_LEN];
685 int i;
686
687 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
688 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
689 for (i = 0; i < width; i++) {
690 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
691 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
692 }
693
694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
695}
696
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100697static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
698{
699 char pmlp_pl[MLXSW_REG_PMLP_LEN];
700
701 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
702 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
704}
705
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200706static int mlxsw_sp_port_open(struct net_device *dev)
707{
708 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
709 int err;
710
711 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
712 if (err)
713 return err;
714 netif_start_queue(dev);
715 return 0;
716}
717
718static int mlxsw_sp_port_stop(struct net_device *dev)
719{
720 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
721
722 netif_stop_queue(dev);
723 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
724}
725
726static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
727 struct net_device *dev)
728{
729 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
730 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
731 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
732 const struct mlxsw_tx_info tx_info = {
733 .local_port = mlxsw_sp_port->local_port,
734 .is_emad = false,
735 };
736 u64 len;
737 int err;
738
Jiri Pirko307c2432016-04-08 19:11:22 +0200739 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200740 return NETDEV_TX_BUSY;
741
742 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
743 struct sk_buff *skb_orig = skb;
744
745 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
746 if (!skb) {
747 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
748 dev_kfree_skb_any(skb_orig);
749 return NETDEV_TX_OK;
750 }
Arkadi Sharshevsky36bf38d2017-01-12 09:10:37 +0100751 dev_consume_skb_any(skb_orig);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200752 }
753
754 if (eth_skb_pad(skb)) {
755 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
756 return NETDEV_TX_OK;
757 }
758
759 mlxsw_sp_txhdr_construct(skb, &tx_info);
Nogah Frankel63dcdd32016-06-17 15:09:05 +0200760 /* TX header is consumed by HW on the way so we shouldn't count its
761 * bytes as being sent.
762 */
763 len = skb->len - MLXSW_TXHDR_LEN;
764
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200765 /* Due to a race we might fail here because of a full queue. In that
766 * unlikely case we simply drop the packet.
767 */
Jiri Pirko307c2432016-04-08 19:11:22 +0200768 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200769
770 if (!err) {
771 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
772 u64_stats_update_begin(&pcpu_stats->syncp);
773 pcpu_stats->tx_packets++;
774 pcpu_stats->tx_bytes += len;
775 u64_stats_update_end(&pcpu_stats->syncp);
776 } else {
777 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
778 dev_kfree_skb_any(skb);
779 }
780 return NETDEV_TX_OK;
781}
782
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100783static void mlxsw_sp_set_rx_mode(struct net_device *dev)
784{
785}
786
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200787static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
788{
789 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
790 struct sockaddr *addr = p;
791 int err;
792
793 if (!is_valid_ether_addr(addr->sa_data))
794 return -EADDRNOTAVAIL;
795
796 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
797 if (err)
798 return err;
799 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
800 return 0;
801}
802
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200803static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200804 bool pause_en, bool pfc_en, u16 delay)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200805{
806 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
807
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200808 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
809 MLXSW_SP_PAUSE_DELAY;
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200810
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200811 if (pause_en || pfc_en)
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200812 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200813 pg_size + delay, pg_size);
814 else
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200815 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200816}
817
818int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200819 u8 *prio_tc, bool pause_en,
820 struct ieee_pfc *my_pfc)
Ido Schimmelff6551e2016-04-06 17:10:03 +0200821{
822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200823 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
824 u16 delay = !!my_pfc ? my_pfc->delay : 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200825 char pbmc_pl[MLXSW_REG_PBMC_LEN];
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200826 int i, j, err;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200827
828 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
829 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
830 if (err)
831 return err;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200832
833 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
834 bool configure = false;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200835 bool pfc = false;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200836
837 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
838 if (prio_tc[j] == i) {
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200839 pfc = pfc_en & BIT(j);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200840 configure = true;
841 break;
842 }
843 }
844
845 if (!configure)
846 continue;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200847 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200848 }
849
Ido Schimmelff6551e2016-04-06 17:10:03 +0200850 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
851}
852
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200853static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200854 int mtu, bool pause_en)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200855{
856 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
857 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200858 struct ieee_pfc *my_pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200859 u8 *prio_tc;
860
861 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200862 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200863
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200864 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200865 pause_en, my_pfc);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200866}
867
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200868static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
869{
870 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200871 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200872 int err;
873
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200874 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200875 if (err)
876 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200877 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
878 if (err)
879 goto err_span_port_mtu_update;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200880 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
881 if (err)
882 goto err_port_mtu_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200883 dev->mtu = mtu;
884 return 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200885
886err_port_mtu_set:
Yotam Gigi763b4b72016-07-21 12:03:17 +0200887 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
888err_span_port_mtu_update:
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200889 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
Ido Schimmelff6551e2016-04-06 17:10:03 +0200890 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200891}
892
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +0300893static int
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200894mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
895 struct rtnl_link_stats64 *stats)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200896{
897 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
898 struct mlxsw_sp_port_pcpu_stats *p;
899 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
900 u32 tx_dropped = 0;
901 unsigned int start;
902 int i;
903
904 for_each_possible_cpu(i) {
905 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
906 do {
907 start = u64_stats_fetch_begin_irq(&p->syncp);
908 rx_packets = p->rx_packets;
909 rx_bytes = p->rx_bytes;
910 tx_packets = p->tx_packets;
911 tx_bytes = p->tx_bytes;
912 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
913
914 stats->rx_packets += rx_packets;
915 stats->rx_bytes += rx_bytes;
916 stats->tx_packets += tx_packets;
917 stats->tx_bytes += tx_bytes;
918 /* tx_dropped is u32, updated without syncp protection. */
919 tx_dropped += p->tx_dropped;
920 }
921 stats->tx_dropped = tx_dropped;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200922 return 0;
923}
924
Or Gerlitz3df5b3c2016-11-22 23:09:54 +0200925static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200926{
927 switch (attr_id) {
928 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
929 return true;
930 }
931
932 return false;
933}
934
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +0300935static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
936 void *sp)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200937{
938 switch (attr_id) {
939 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
940 return mlxsw_sp_port_get_sw_stats64(dev, sp);
941 }
942
943 return -EINVAL;
944}
945
946static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
947 int prio, char *ppcnt_pl)
948{
949 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
950 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
951
952 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
953 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
954}
955
956static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
957 struct rtnl_link_stats64 *stats)
958{
959 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
960 int err;
961
962 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
963 0, ppcnt_pl);
964 if (err)
965 goto out;
966
967 stats->tx_packets =
968 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
969 stats->rx_packets =
970 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
971 stats->tx_bytes =
972 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
973 stats->rx_bytes =
974 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
975 stats->multicast =
976 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
977
978 stats->rx_crc_errors =
979 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
980 stats->rx_frame_errors =
981 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
982
983 stats->rx_length_errors = (
984 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
985 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
986 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
987
988 stats->rx_errors = (stats->rx_crc_errors +
989 stats->rx_frame_errors + stats->rx_length_errors);
990
991out:
992 return err;
993}
994
995static void update_stats_cache(struct work_struct *work)
996{
997 struct mlxsw_sp_port *mlxsw_sp_port =
998 container_of(work, struct mlxsw_sp_port,
999 hw_stats.update_dw.work);
1000
1001 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1002 goto out;
1003
1004 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1005 mlxsw_sp_port->hw_stats.cache);
1006
1007out:
1008 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1009 MLXSW_HW_STATS_UPDATE_TIME);
1010}
1011
1012/* Return the stats from a cache that is updated periodically,
1013 * as this function might get called in an atomic context.
1014 */
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001015static void
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001016mlxsw_sp_port_get_stats64(struct net_device *dev,
1017 struct rtnl_link_stats64 *stats)
1018{
1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020
1021 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001022}
1023
1024int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1025 u16 vid_end, bool is_member, bool untagged)
1026{
1027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1028 char *spvm_pl;
1029 int err;
1030
1031 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1032 if (!spvm_pl)
1033 return -ENOMEM;
1034
1035 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1036 vid_end, is_member, untagged);
1037 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1038 kfree(spvm_pl);
1039 return err;
1040}
1041
1042static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1043{
1044 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1045 u16 vid, last_visited_vid;
1046 int err;
1047
1048 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1049 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
1050 vid);
1051 if (err) {
1052 last_visited_vid = vid;
1053 goto err_port_vid_to_fid_set;
1054 }
1055 }
1056
1057 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1058 if (err) {
1059 last_visited_vid = VLAN_N_VID;
1060 goto err_port_vid_to_fid_set;
1061 }
1062
1063 return 0;
1064
1065err_port_vid_to_fid_set:
1066 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1067 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1068 vid);
1069 return err;
1070}
1071
1072static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1073{
1074 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1075 u16 vid;
1076 int err;
1077
1078 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1079 if (err)
1080 return err;
1081
1082 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1083 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1084 vid, vid);
1085 if (err)
1086 return err;
1087 }
1088
1089 return 0;
1090}
1091
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001092static struct mlxsw_sp_port *
Ido Schimmel0355b592016-06-20 23:04:13 +02001093mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001094{
1095 struct mlxsw_sp_port *mlxsw_sp_vport;
1096
1097 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1098 if (!mlxsw_sp_vport)
1099 return NULL;
1100
1101 /* dev will be set correctly after the VLAN device is linked
1102 * with the real device. In case of bridge SELF invocation, dev
1103 * will remain as is.
1104 */
1105 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1106 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1107 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1108 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
Ido Schimmel272c4472015-12-15 16:03:47 +01001109 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1110 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel0355b592016-06-20 23:04:13 +02001111 mlxsw_sp_vport->vport.vid = vid;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001112
1113 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1114
1115 return mlxsw_sp_vport;
1116}
1117
1118static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1119{
1120 list_del(&mlxsw_sp_vport->vport.list);
1121 kfree(mlxsw_sp_vport);
1122}
1123
Ido Schimmel05978482016-08-17 16:39:30 +02001124static int mlxsw_sp_port_add_vid(struct net_device *dev,
1125 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001126{
1127 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001128 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel52697a92016-07-02 11:00:09 +02001129 bool untagged = vid == 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001130 int err;
1131
1132 /* VLAN 0 is added to HW filter when device goes up, but it is
1133 * reserved in our case, so simply return.
1134 */
1135 if (!vid)
1136 return 0;
1137
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001138 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001139 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001140
Ido Schimmel0355b592016-06-20 23:04:13 +02001141 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001142 if (!mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02001143 return -ENOMEM;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001144
1145 /* When adding the first VLAN interface on a bridged port we need to
1146 * transition all the active 802.1Q bridge VLANs to use explicit
1147 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1148 */
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001149 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001150 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001151 if (err)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001152 goto err_port_vp_mode_trans;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001153 }
1154
Ido Schimmel52697a92016-07-02 11:00:09 +02001155 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001156 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001157 goto err_port_add_vid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001158
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001159 return 0;
1160
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001161err_port_add_vid:
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001162 if (list_is_singular(&mlxsw_sp_port->vports_list))
1163 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1164err_port_vp_mode_trans:
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001165 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001166 return err;
1167}
1168
Ido Schimmel32d863f2016-07-02 11:00:10 +02001169static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1170 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001171{
1172 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001173 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel1c800752016-06-20 23:04:20 +02001174 struct mlxsw_sp_fid *f;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001175
1176 /* VLAN 0 is removed from HW filter when device goes down, but
1177 * it is reserved in our case, so simply return.
1178 */
1179 if (!vid)
1180 return 0;
1181
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001182 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel7a355832016-08-17 16:39:28 +02001183 if (WARN_ON(!mlxsw_sp_vport))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001184 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001185
Ido Schimmel7a355832016-08-17 16:39:28 +02001186 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001187
Ido Schimmel1c800752016-06-20 23:04:20 +02001188 /* Drop FID reference. If this was the last reference the
1189 * resources will be freed.
1190 */
1191 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1192 if (f && !WARN_ON(!f->leave))
1193 f->leave(mlxsw_sp_vport);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001194
1195 /* When removing the last VLAN interface on a bridged port we need to
1196 * transition all active 802.1Q bridge VLANs to use VID to FID
1197 * mappings and set port's mode to VLAN mode.
1198 */
Ido Schimmel7a355832016-08-17 16:39:28 +02001199 if (list_is_singular(&mlxsw_sp_port->vports_list))
1200 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001201
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001202 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1203
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001204 return 0;
1205}
1206
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001207static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1208 size_t len)
1209{
1210 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmeld664b412016-06-09 09:51:40 +02001211 u8 module = mlxsw_sp_port->mapping.module;
1212 u8 width = mlxsw_sp_port->mapping.width;
1213 u8 lane = mlxsw_sp_port->mapping.lane;
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001214 int err;
1215
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001216 if (!mlxsw_sp_port->split)
1217 err = snprintf(name, len, "p%d", module + 1);
1218 else
1219 err = snprintf(name, len, "p%ds%d", module + 1,
1220 lane / width);
1221
1222 if (err >= len)
1223 return -EINVAL;
1224
1225 return 0;
1226}
1227
Yotam Gigi763b4b72016-07-21 12:03:17 +02001228static struct mlxsw_sp_port_mall_tc_entry *
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001229mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1230 unsigned long cookie) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001231 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1232
1233 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1234 if (mall_tc_entry->cookie == cookie)
1235 return mall_tc_entry;
1236
1237 return NULL;
1238}
1239
1240static int
1241mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001242 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001243 const struct tc_action *a,
1244 bool ingress)
1245{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001246 struct net *net = dev_net(mlxsw_sp_port->dev);
1247 enum mlxsw_sp_span_type span_type;
1248 struct mlxsw_sp_port *to_port;
1249 struct net_device *to_dev;
1250 int ifindex;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001251
1252 ifindex = tcf_mirred_ifindex(a);
1253 to_dev = __dev_get_by_index(net, ifindex);
1254 if (!to_dev) {
1255 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1256 return -EINVAL;
1257 }
1258
1259 if (!mlxsw_sp_port_dev_check(to_dev)) {
1260 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
Yotam Gigie915ac62017-01-09 11:25:48 +01001261 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001262 }
1263 to_port = netdev_priv(to_dev);
1264
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001265 mirror->to_local_port = to_port->local_port;
1266 mirror->ingress = ingress;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001267 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001268 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1269}
Yotam Gigi763b4b72016-07-21 12:03:17 +02001270
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001271static void
1272mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1273 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1274{
1275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1276 enum mlxsw_sp_span_type span_type;
1277 struct mlxsw_sp_port *to_port;
1278
1279 to_port = mlxsw_sp->ports[mirror->to_local_port];
1280 span_type = mirror->ingress ?
1281 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1282 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001283}
1284
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001285static int
1286mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1287 struct tc_cls_matchall_offload *cls,
1288 const struct tc_action *a,
1289 bool ingress)
1290{
1291 int err;
1292
1293 if (!mlxsw_sp_port->sample)
1294 return -EOPNOTSUPP;
1295 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1296 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1297 return -EEXIST;
1298 }
1299 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1300 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1301 return -EOPNOTSUPP;
1302 }
1303
1304 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1305 tcf_sample_psample_group(a));
1306 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1307 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1308 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1309
1310 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1311 if (err)
1312 goto err_port_sample_set;
1313 return 0;
1314
1315err_port_sample_set:
1316 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1317 return err;
1318}
1319
1320static void
1321mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1322{
1323 if (!mlxsw_sp_port->sample)
1324 return;
1325
1326 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1327 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1328}
1329
Yotam Gigi763b4b72016-07-21 12:03:17 +02001330static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1331 __be16 protocol,
1332 struct tc_cls_matchall_offload *cls,
1333 bool ingress)
1334{
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001335 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001336 const struct tc_action *a;
WANG Cong22dc13c2016-08-13 22:35:00 -07001337 LIST_HEAD(actions);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001338 int err;
1339
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001340 if (!tc_single_action(cls->exts)) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001341 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
Yotam Gigie915ac62017-01-09 11:25:48 +01001342 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001343 }
1344
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001345 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1346 if (!mall_tc_entry)
1347 return -ENOMEM;
1348 mall_tc_entry->cookie = cls->cookie;
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001349
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001350 tcf_exts_to_list(cls->exts, &actions);
1351 a = list_first_entry(&actions, struct tc_action, list);
1352
1353 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1354 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1355
1356 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1357 mirror = &mall_tc_entry->mirror;
1358 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1359 mirror, a, ingress);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001360 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1361 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1362 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1363 a, ingress);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001364 } else {
1365 err = -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001366 }
1367
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001368 if (err)
1369 goto err_add_action;
1370
1371 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001372 return 0;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001373
1374err_add_action:
1375 kfree(mall_tc_entry);
1376 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001377}
1378
1379static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1380 struct tc_cls_matchall_offload *cls)
1381{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001382 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001383
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001384 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1385 cls->cookie);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001386 if (!mall_tc_entry) {
1387 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1388 return;
1389 }
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001390 list_del(&mall_tc_entry->list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001391
1392 switch (mall_tc_entry->type) {
1393 case MLXSW_SP_PORT_MALL_MIRROR:
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001394 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1395 &mall_tc_entry->mirror);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001396 break;
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001397 case MLXSW_SP_PORT_MALL_SAMPLE:
1398 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1399 break;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001400 default:
1401 WARN_ON(1);
1402 }
1403
Yotam Gigi763b4b72016-07-21 12:03:17 +02001404 kfree(mall_tc_entry);
1405}
1406
1407static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1408 __be16 proto, struct tc_to_netdev *tc)
1409{
1410 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1411 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1412
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001413 switch (tc->type) {
1414 case TC_SETUP_MATCHALL:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001415 switch (tc->cls_mall->command) {
1416 case TC_CLSMATCHALL_REPLACE:
1417 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1418 proto,
1419 tc->cls_mall,
1420 ingress);
1421 case TC_CLSMATCHALL_DESTROY:
1422 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1423 tc->cls_mall);
1424 return 0;
1425 default:
Or Gerlitzabbdf4b2017-03-17 09:38:01 +01001426 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001427 }
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001428 case TC_SETUP_CLSFLOWER:
1429 switch (tc->cls_flower->command) {
1430 case TC_CLSFLOWER_REPLACE:
1431 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1432 proto, tc->cls_flower);
1433 case TC_CLSFLOWER_DESTROY:
1434 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1435 tc->cls_flower);
1436 return 0;
Arkadi Sharshevsky7c1b8eb2017-03-11 09:42:59 +01001437 case TC_CLSFLOWER_STATS:
1438 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1439 tc->cls_flower);
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001440 default:
1441 return -EOPNOTSUPP;
1442 }
Yotam Gigi763b4b72016-07-21 12:03:17 +02001443 }
1444
Yotam Gigie915ac62017-01-09 11:25:48 +01001445 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001446}
1447
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001448static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1449 .ndo_open = mlxsw_sp_port_open,
1450 .ndo_stop = mlxsw_sp_port_stop,
1451 .ndo_start_xmit = mlxsw_sp_port_xmit,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001452 .ndo_setup_tc = mlxsw_sp_setup_tc,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001453 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001454 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1455 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1456 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001457 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1458 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001459 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1460 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1461 .ndo_fdb_add = switchdev_port_fdb_add,
1462 .ndo_fdb_del = switchdev_port_fdb_del,
1463 .ndo_fdb_dump = switchdev_port_fdb_dump,
1464 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1465 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1466 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001467 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001468};
1469
1470static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1471 struct ethtool_drvinfo *drvinfo)
1472{
1473 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1475
1476 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1477 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1478 sizeof(drvinfo->version));
1479 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1480 "%d.%d.%d",
1481 mlxsw_sp->bus_info->fw_rev.major,
1482 mlxsw_sp->bus_info->fw_rev.minor,
1483 mlxsw_sp->bus_info->fw_rev.subminor);
1484 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1485 sizeof(drvinfo->bus_info));
1486}
1487
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001488static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1489 struct ethtool_pauseparam *pause)
1490{
1491 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1492
1493 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1494 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1495}
1496
1497static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1498 struct ethtool_pauseparam *pause)
1499{
1500 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1501
1502 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1503 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1504 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1505
1506 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1507 pfcc_pl);
1508}
1509
1510static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1511 struct ethtool_pauseparam *pause)
1512{
1513 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1514 bool pause_en = pause->tx_pause || pause->rx_pause;
1515 int err;
1516
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001517 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1518 netdev_err(dev, "PFC already enabled on port\n");
1519 return -EINVAL;
1520 }
1521
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001522 if (pause->autoneg) {
1523 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1524 return -EINVAL;
1525 }
1526
1527 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1528 if (err) {
1529 netdev_err(dev, "Failed to configure port's headroom\n");
1530 return err;
1531 }
1532
1533 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1534 if (err) {
1535 netdev_err(dev, "Failed to set PAUSE parameters\n");
1536 goto err_port_pause_configure;
1537 }
1538
1539 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1540 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1541
1542 return 0;
1543
1544err_port_pause_configure:
1545 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1546 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1547 return err;
1548}
1549
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001550struct mlxsw_sp_port_hw_stats {
1551 char str[ETH_GSTRING_LEN];
Jiri Pirko412791d2016-10-21 16:07:19 +02001552 u64 (*getter)(const char *payload);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001553};
1554
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001555static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001556 {
1557 .str = "a_frames_transmitted_ok",
1558 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1559 },
1560 {
1561 .str = "a_frames_received_ok",
1562 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1563 },
1564 {
1565 .str = "a_frame_check_sequence_errors",
1566 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1567 },
1568 {
1569 .str = "a_alignment_errors",
1570 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1571 },
1572 {
1573 .str = "a_octets_transmitted_ok",
1574 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1575 },
1576 {
1577 .str = "a_octets_received_ok",
1578 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1579 },
1580 {
1581 .str = "a_multicast_frames_xmitted_ok",
1582 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1583 },
1584 {
1585 .str = "a_broadcast_frames_xmitted_ok",
1586 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1587 },
1588 {
1589 .str = "a_multicast_frames_received_ok",
1590 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1591 },
1592 {
1593 .str = "a_broadcast_frames_received_ok",
1594 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1595 },
1596 {
1597 .str = "a_in_range_length_errors",
1598 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1599 },
1600 {
1601 .str = "a_out_of_range_length_field",
1602 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1603 },
1604 {
1605 .str = "a_frame_too_long_errors",
1606 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1607 },
1608 {
1609 .str = "a_symbol_error_during_carrier",
1610 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1611 },
1612 {
1613 .str = "a_mac_control_frames_transmitted",
1614 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1615 },
1616 {
1617 .str = "a_mac_control_frames_received",
1618 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1619 },
1620 {
1621 .str = "a_unsupported_opcodes_received",
1622 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1623 },
1624 {
1625 .str = "a_pause_mac_ctrl_frames_received",
1626 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1627 },
1628 {
1629 .str = "a_pause_mac_ctrl_frames_xmitted",
1630 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1631 },
1632};
1633
1634#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1635
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001636static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1637 {
1638 .str = "rx_octets_prio",
1639 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1640 },
1641 {
1642 .str = "rx_frames_prio",
1643 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1644 },
1645 {
1646 .str = "tx_octets_prio",
1647 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1648 },
1649 {
1650 .str = "tx_frames_prio",
1651 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1652 },
1653 {
1654 .str = "rx_pause_prio",
1655 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1656 },
1657 {
1658 .str = "rx_pause_duration_prio",
1659 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1660 },
1661 {
1662 .str = "tx_pause_prio",
1663 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1664 },
1665 {
1666 .str = "tx_pause_duration_prio",
1667 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1668 },
1669};
1670
1671#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1672
Jiri Pirko412791d2016-10-21 16:07:19 +02001673static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001674{
1675 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1676
1677 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1678}
1679
1680static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1681 {
1682 .str = "tc_transmit_queue_tc",
1683 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1684 },
1685 {
1686 .str = "tc_no_buffer_discard_uc_tc",
1687 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1688 },
1689};
1690
1691#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1692
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001693#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001694 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1695 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001696 IEEE_8021QAZ_MAX_TCS)
1697
1698static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1699{
1700 int i;
1701
1702 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1703 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1704 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1705 *p += ETH_GSTRING_LEN;
1706 }
1707}
1708
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001709static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1710{
1711 int i;
1712
1713 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1714 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1715 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1716 *p += ETH_GSTRING_LEN;
1717 }
1718}
1719
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001720static void mlxsw_sp_port_get_strings(struct net_device *dev,
1721 u32 stringset, u8 *data)
1722{
1723 u8 *p = data;
1724 int i;
1725
1726 switch (stringset) {
1727 case ETH_SS_STATS:
1728 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1729 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1730 ETH_GSTRING_LEN);
1731 p += ETH_GSTRING_LEN;
1732 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001733
1734 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1735 mlxsw_sp_port_get_prio_strings(&p, i);
1736
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001737 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1738 mlxsw_sp_port_get_tc_strings(&p, i);
1739
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001740 break;
1741 }
1742}
1743
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001744static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1745 enum ethtool_phys_id_state state)
1746{
1747 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1748 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1749 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1750 bool active;
1751
1752 switch (state) {
1753 case ETHTOOL_ID_ACTIVE:
1754 active = true;
1755 break;
1756 case ETHTOOL_ID_INACTIVE:
1757 active = false;
1758 break;
1759 default:
1760 return -EOPNOTSUPP;
1761 }
1762
1763 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1765}
1766
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001767static int
1768mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1769 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1770{
1771 switch (grp) {
1772 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1773 *p_hw_stats = mlxsw_sp_port_hw_stats;
1774 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1775 break;
1776 case MLXSW_REG_PPCNT_PRIO_CNT:
1777 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1778 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1779 break;
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001780 case MLXSW_REG_PPCNT_TC_CNT:
1781 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1782 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1783 break;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001784 default:
1785 WARN_ON(1);
Yotam Gigie915ac62017-01-09 11:25:48 +01001786 return -EOPNOTSUPP;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001787 }
1788 return 0;
1789}
1790
1791static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1792 enum mlxsw_reg_ppcnt_grp grp, int prio,
1793 u64 *data, int data_index)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001794{
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001795 struct mlxsw_sp_port_hw_stats *hw_stats;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001796 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001797 int i, len;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001798 int err;
1799
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001800 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1801 if (err)
1802 return;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001803 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001804 for (i = 0; i < len; i++)
Colin Ian Kingfaac0ff2016-09-23 12:02:45 +01001805 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001806}
1807
1808static void mlxsw_sp_port_get_stats(struct net_device *dev,
1809 struct ethtool_stats *stats, u64 *data)
1810{
1811 int i, data_index = 0;
1812
1813 /* IEEE 802.3 Counters */
1814 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1815 data, data_index);
1816 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1817
1818 /* Per-Priority Counters */
1819 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1820 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1821 data, data_index);
1822 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1823 }
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001824
1825 /* Per-TC Counters */
1826 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1827 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1828 data, data_index);
1829 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1830 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001831}
1832
1833static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1834{
1835 switch (sset) {
1836 case ETH_SS_STATS:
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001837 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001838 default:
1839 return -EOPNOTSUPP;
1840 }
1841}
1842
1843struct mlxsw_sp_port_link_mode {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001844 enum ethtool_link_mode_bit_indices mask_ethtool;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001845 u32 mask;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001846 u32 speed;
1847};
1848
1849static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1850 {
1851 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001852 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1853 .speed = SPEED_100,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001854 },
1855 {
1856 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1857 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001858 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1859 .speed = SPEED_1000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001860 },
1861 {
1862 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001863 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1864 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001865 },
1866 {
1867 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1868 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001869 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1870 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001871 },
1872 {
1873 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1874 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1875 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1876 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001877 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1878 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001879 },
1880 {
1881 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001882 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1883 .speed = SPEED_20000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001884 },
1885 {
1886 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001887 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1888 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001889 },
1890 {
1891 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001892 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1893 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001894 },
1895 {
1896 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001897 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1898 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001899 },
1900 {
1901 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001902 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1903 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001904 },
1905 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001906 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1907 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1908 .speed = SPEED_25000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001909 },
1910 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001911 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1912 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1913 .speed = SPEED_25000,
1914 },
1915 {
1916 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1917 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1918 .speed = SPEED_25000,
1919 },
1920 {
1921 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1922 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1923 .speed = SPEED_25000,
1924 },
1925 {
1926 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1927 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1928 .speed = SPEED_50000,
1929 },
1930 {
1931 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1932 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1933 .speed = SPEED_50000,
1934 },
1935 {
1936 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1937 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1938 .speed = SPEED_50000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001939 },
1940 {
1941 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001942 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1943 .speed = SPEED_56000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001944 },
1945 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001946 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1947 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1948 .speed = SPEED_56000,
1949 },
1950 {
1951 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1952 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1953 .speed = SPEED_56000,
1954 },
1955 {
1956 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1957 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
1958 .speed = SPEED_56000,
1959 },
1960 {
1961 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
1962 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1963 .speed = SPEED_100000,
1964 },
1965 {
1966 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
1967 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1968 .speed = SPEED_100000,
1969 },
1970 {
1971 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
1972 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1973 .speed = SPEED_100000,
1974 },
1975 {
1976 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1977 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1978 .speed = SPEED_100000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001979 },
1980};
1981
1982#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1983
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001984static void
1985mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
1986 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001987{
1988 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1989 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1990 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1991 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1992 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1993 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001994 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001995
1996 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1997 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1998 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1999 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2000 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002001 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002002}
2003
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002004static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002005{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002006 int i;
2007
2008 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2009 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002010 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2011 mode);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002012 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002013}
2014
2015static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002016 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002017{
2018 u32 speed = SPEED_UNKNOWN;
2019 u8 duplex = DUPLEX_UNKNOWN;
2020 int i;
2021
2022 if (!carrier_ok)
2023 goto out;
2024
2025 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2026 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2027 speed = mlxsw_sp_port_link_mode[i].speed;
2028 duplex = DUPLEX_FULL;
2029 break;
2030 }
2031 }
2032out:
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002033 cmd->base.speed = speed;
2034 cmd->base.duplex = duplex;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002035}
2036
2037static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2038{
2039 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2040 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2041 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2042 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2043 return PORT_FIBRE;
2044
2045 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2046 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2047 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2048 return PORT_DA;
2049
2050 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2051 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2052 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2053 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2054 return PORT_NONE;
2055
2056 return PORT_OTHER;
2057}
2058
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002059static u32
2060mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002061{
2062 u32 ptys_proto = 0;
2063 int i;
2064
2065 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002066 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2067 cmd->link_modes.advertising))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002068 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2069 }
2070 return ptys_proto;
2071}
2072
2073static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2074{
2075 u32 ptys_proto = 0;
2076 int i;
2077
2078 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2079 if (speed == mlxsw_sp_port_link_mode[i].speed)
2080 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2081 }
2082 return ptys_proto;
2083}
2084
Ido Schimmel18f1e702016-02-26 17:32:31 +01002085static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2086{
2087 u32 ptys_proto = 0;
2088 int i;
2089
2090 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2091 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2092 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2093 }
2094 return ptys_proto;
2095}
2096
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002097static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2098 struct ethtool_link_ksettings *cmd)
2099{
2100 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2101 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2102 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2103
2104 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2105 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2106}
2107
2108static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2109 struct ethtool_link_ksettings *cmd)
2110{
2111 if (!autoneg)
2112 return;
2113
2114 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2115 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2116}
2117
2118static void
2119mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2120 struct ethtool_link_ksettings *cmd)
2121{
2122 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2123 return;
2124
2125 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2126 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2127}
2128
2129static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2130 struct ethtool_link_ksettings *cmd)
2131{
2132 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2133 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2134 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2135 char ptys_pl[MLXSW_REG_PTYS_LEN];
2136 u8 autoneg_status;
2137 bool autoneg;
2138 int err;
2139
2140 autoneg = mlxsw_sp_port->link.autoneg;
Elad Raz401c8b42016-10-28 21:35:52 +02002141 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002142 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2143 if (err)
2144 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002145 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2146 &eth_proto_oper);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002147
2148 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2149
2150 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2151
2152 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2153 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2154 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2155
2156 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2157 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2158 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2159 cmd);
2160
2161 return 0;
2162}
2163
2164static int
2165mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2166 const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002167{
2168 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2169 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2170 char ptys_pl[MLXSW_REG_PTYS_LEN];
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002171 u32 eth_proto_cap, eth_proto_new;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002172 bool autoneg;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002173 int err;
2174
Elad Raz401c8b42016-10-28 21:35:52 +02002175 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002177 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002178 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002179 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002180
2181 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2182 eth_proto_new = autoneg ?
2183 mlxsw_sp_to_ptys_advert_link(cmd) :
2184 mlxsw_sp_to_ptys_speed(cmd->base.speed);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002185
2186 eth_proto_new = eth_proto_new & eth_proto_cap;
2187 if (!eth_proto_new) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002188 netdev_err(dev, "No supported speed requested\n");
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002189 return -EINVAL;
2190 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002191
Elad Raz401c8b42016-10-28 21:35:52 +02002192 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2193 eth_proto_new);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002194 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002195 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002196 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002197
Ido Schimmel6277d462016-07-15 11:14:58 +02002198 if (!netif_running(dev))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002199 return 0;
2200
Ido Schimmel0c83f882016-09-12 13:26:23 +02002201 mlxsw_sp_port->link.autoneg = autoneg;
2202
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002203 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2204 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002205
2206 return 0;
2207}
2208
2209static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2210 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2211 .get_link = ethtool_op_get_link,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02002212 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2213 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002214 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002215 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002216 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2217 .get_sset_count = mlxsw_sp_port_get_sset_count,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002218 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2219 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002220};
2221
Ido Schimmel18f1e702016-02-26 17:32:31 +01002222static int
2223mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2224{
2225 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2226 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2227 char ptys_pl[MLXSW_REG_PTYS_LEN];
2228 u32 eth_proto_admin;
2229
2230 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
Elad Raz401c8b42016-10-28 21:35:52 +02002231 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2232 eth_proto_admin);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002233 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2234}
2235
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002236int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2237 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2238 bool dwrr, u8 dwrr_weight)
Ido Schimmel90183b92016-04-06 17:10:08 +02002239{
2240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2241 char qeec_pl[MLXSW_REG_QEEC_LEN];
2242
2243 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2244 next_index);
2245 mlxsw_reg_qeec_de_set(qeec_pl, true);
2246 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2247 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2249}
2250
Ido Schimmelcc7cf512016-04-06 17:10:11 +02002251int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2252 enum mlxsw_reg_qeec_hr hr, u8 index,
2253 u8 next_index, u32 maxrate)
Ido Schimmel90183b92016-04-06 17:10:08 +02002254{
2255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2256 char qeec_pl[MLXSW_REG_QEEC_LEN];
2257
2258 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2259 next_index);
2260 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2261 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2263}
2264
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002265int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2266 u8 switch_prio, u8 tclass)
Ido Schimmel90183b92016-04-06 17:10:08 +02002267{
2268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2269 char qtct_pl[MLXSW_REG_QTCT_LEN];
2270
2271 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2272 tclass);
2273 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2274}
2275
2276static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2277{
2278 int err, i;
2279
2280 /* Setup the elements hierarcy, so that each TC is linked to
2281 * one subgroup, which are all member in the same group.
2282 */
2283 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2284 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2285 0);
2286 if (err)
2287 return err;
2288 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2289 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2290 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2291 0, false, 0);
2292 if (err)
2293 return err;
2294 }
2295 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2296 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2297 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2298 false, 0);
2299 if (err)
2300 return err;
2301 }
2302
2303 /* Make sure the max shaper is disabled in all hierarcies that
2304 * support it.
2305 */
2306 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2307 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2308 MLXSW_REG_QEEC_MAS_DIS);
2309 if (err)
2310 return err;
2311 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2312 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2313 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2314 i, 0,
2315 MLXSW_REG_QEEC_MAS_DIS);
2316 if (err)
2317 return err;
2318 }
2319 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2320 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2321 MLXSW_REG_QEEC_HIERARCY_TC,
2322 i, i,
2323 MLXSW_REG_QEEC_MAS_DIS);
2324 if (err)
2325 return err;
2326 }
2327
2328 /* Map all priorities to traffic class 0. */
2329 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2330 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2331 if (err)
2332 return err;
2333 }
2334
2335 return 0;
2336}
2337
Ido Schimmel05978482016-08-17 16:39:30 +02002338static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2339{
2340 mlxsw_sp_port->pvid = 1;
2341
2342 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2343}
2344
2345static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2346{
2347 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2348}
2349
Jiri Pirko67963a32016-10-28 21:35:55 +02002350static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2351 bool split, u8 module, u8 width, u8 lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002352{
2353 struct mlxsw_sp_port *mlxsw_sp_port;
2354 struct net_device *dev;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002355 size_t bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002356 int err;
2357
2358 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2359 if (!dev)
2360 return -ENOMEM;
Jiri Pirkof20a91f2016-10-27 15:13:00 +02002361 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002362 mlxsw_sp_port = netdev_priv(dev);
2363 mlxsw_sp_port->dev = dev;
2364 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2365 mlxsw_sp_port->local_port = local_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002366 mlxsw_sp_port->split = split;
Ido Schimmeld664b412016-06-09 09:51:40 +02002367 mlxsw_sp_port->mapping.module = module;
2368 mlxsw_sp_port->mapping.width = width;
2369 mlxsw_sp_port->mapping.lane = lane;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002370 mlxsw_sp_port->link.autoneg = 1;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002371 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2372 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2373 if (!mlxsw_sp_port->active_vlans) {
2374 err = -ENOMEM;
2375 goto err_port_active_vlans_alloc;
2376 }
Elad Razfc1273a2016-01-06 13:01:11 +01002377 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2378 if (!mlxsw_sp_port->untagged_vlans) {
2379 err = -ENOMEM;
2380 goto err_port_untagged_vlans_alloc;
2381 }
Ido Schimmel7f71eb42015-12-15 16:03:37 +01002382 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02002383 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002384
2385 mlxsw_sp_port->pcpu_stats =
2386 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2387 if (!mlxsw_sp_port->pcpu_stats) {
2388 err = -ENOMEM;
2389 goto err_alloc_stats;
2390 }
2391
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002392 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2393 GFP_KERNEL);
2394 if (!mlxsw_sp_port->sample) {
2395 err = -ENOMEM;
2396 goto err_alloc_sample;
2397 }
2398
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002399 mlxsw_sp_port->hw_stats.cache =
2400 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2401
2402 if (!mlxsw_sp_port->hw_stats.cache) {
2403 err = -ENOMEM;
2404 goto err_alloc_hw_stats;
2405 }
2406 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2407 &update_stats_cache);
2408
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002409 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2410 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2411
Ido Schimmel3247ff22016-09-08 08:16:02 +02002412 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2413 if (err) {
2414 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2415 mlxsw_sp_port->local_port);
2416 goto err_port_swid_set;
2417 }
2418
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002419 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2420 if (err) {
2421 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2422 mlxsw_sp_port->local_port);
2423 goto err_dev_addr_init;
2424 }
2425
2426 netif_carrier_off(dev);
2427
2428 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
Yotam Gigi763b4b72016-07-21 12:03:17 +02002429 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2430 dev->hw_features |= NETIF_F_HW_TC;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002431
Jarod Wilsond894be52016-10-20 13:55:16 -04002432 dev->min_mtu = 0;
2433 dev->max_mtu = ETH_MAX_MTU;
2434
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002435 /* Each packet needs to have a Tx header (metadata) on top all other
2436 * headers.
2437 */
Yotam Gigifeb7d382016-10-04 09:46:04 +02002438 dev->needed_headroom = MLXSW_TXHDR_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002439
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002440 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2441 if (err) {
2442 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2443 mlxsw_sp_port->local_port);
2444 goto err_port_system_port_mapping_set;
2445 }
2446
Ido Schimmel18f1e702016-02-26 17:32:31 +01002447 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2448 if (err) {
2449 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2450 mlxsw_sp_port->local_port);
2451 goto err_port_speed_by_width_set;
2452 }
2453
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002454 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2455 if (err) {
2456 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2457 mlxsw_sp_port->local_port);
2458 goto err_port_mtu_set;
2459 }
2460
2461 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2462 if (err)
2463 goto err_port_admin_status_set;
2464
2465 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2466 if (err) {
2467 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2468 mlxsw_sp_port->local_port);
2469 goto err_port_buffers_init;
2470 }
2471
Ido Schimmel90183b92016-04-06 17:10:08 +02002472 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2473 if (err) {
2474 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2475 mlxsw_sp_port->local_port);
2476 goto err_port_ets_init;
2477 }
2478
Ido Schimmelf00817d2016-04-06 17:10:09 +02002479 /* ETS and buffers must be initialized before DCB. */
2480 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2481 if (err) {
2482 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2483 mlxsw_sp_port->local_port);
2484 goto err_port_dcb_init;
2485 }
2486
Ido Schimmel05978482016-08-17 16:39:30 +02002487 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2488 if (err) {
2489 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2490 mlxsw_sp_port->local_port);
2491 goto err_port_pvid_vport_create;
2492 }
2493
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002494 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
Ido Schimmel2f258442016-08-17 16:39:31 +02002495 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002496 err = register_netdev(dev);
2497 if (err) {
2498 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2499 mlxsw_sp_port->local_port);
2500 goto err_register_netdev;
2501 }
2502
Elad Razd808c7e2016-10-28 21:35:57 +02002503 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2504 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2505 module);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002506 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002507 return 0;
2508
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002509err_register_netdev:
Ido Schimmel2f258442016-08-17 16:39:31 +02002510 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002511 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmel05978482016-08-17 16:39:30 +02002512 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2513err_port_pvid_vport_create:
Ido Schimmel4de34eb2016-08-04 17:36:22 +03002514 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002515err_port_dcb_init:
Ido Schimmel90183b92016-04-06 17:10:08 +02002516err_port_ets_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002517err_port_buffers_init:
2518err_port_admin_status_set:
2519err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01002520err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002521err_port_system_port_mapping_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002522err_dev_addr_init:
Ido Schimmel3247ff22016-09-08 08:16:02 +02002523 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2524err_port_swid_set:
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002525 kfree(mlxsw_sp_port->hw_stats.cache);
2526err_alloc_hw_stats:
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002527 kfree(mlxsw_sp_port->sample);
2528err_alloc_sample:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002529 free_percpu(mlxsw_sp_port->pcpu_stats);
2530err_alloc_stats:
Elad Razfc1273a2016-01-06 13:01:11 +01002531 kfree(mlxsw_sp_port->untagged_vlans);
2532err_port_untagged_vlans_alloc:
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002533 kfree(mlxsw_sp_port->active_vlans);
2534err_port_active_vlans_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002535 free_netdev(dev);
2536 return err;
2537}
2538
Jiri Pirko67963a32016-10-28 21:35:55 +02002539static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2540 bool split, u8 module, u8 width, u8 lane)
2541{
2542 int err;
2543
2544 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2545 if (err) {
2546 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2547 local_port);
2548 return err;
2549 }
Ido Schimmel9a60c902016-12-16 19:29:03 +01002550 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
Jiri Pirko67963a32016-10-28 21:35:55 +02002551 module, width, lane);
2552 if (err)
2553 goto err_port_create;
2554 return 0;
2555
2556err_port_create:
2557 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2558 return err;
2559}
2560
2561static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002562{
2563 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2564
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002565 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
Jiri Pirko67963a32016-10-28 21:35:55 +02002566 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002567 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Ido Schimmel2f258442016-08-17 16:39:31 +02002568 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002569 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmel05978482016-08-17 16:39:30 +02002570 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002571 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01002572 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2573 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002574 kfree(mlxsw_sp_port->hw_stats.cache);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002575 kfree(mlxsw_sp_port->sample);
Yotam Gigi136f1442017-01-09 11:25:47 +01002576 free_percpu(mlxsw_sp_port->pcpu_stats);
Elad Razfc1273a2016-01-06 13:01:11 +01002577 kfree(mlxsw_sp_port->untagged_vlans);
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002578 kfree(mlxsw_sp_port->active_vlans);
Ido Schimmel32d863f2016-07-02 11:00:10 +02002579 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002580 free_netdev(mlxsw_sp_port->dev);
2581}
2582
Jiri Pirko67963a32016-10-28 21:35:55 +02002583static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2584{
2585 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2586 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2587}
2588
Jiri Pirkof83e2102016-10-28 21:35:49 +02002589static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2590{
2591 return mlxsw_sp->ports[local_port] != NULL;
2592}
2593
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002594static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2595{
2596 int i;
2597
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002598 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002599 if (mlxsw_sp_port_created(mlxsw_sp, i))
2600 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002601 kfree(mlxsw_sp->port_to_module);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002602 kfree(mlxsw_sp->ports);
2603}
2604
2605static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2606{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002607 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
Ido Schimmeld664b412016-06-09 09:51:40 +02002608 u8 module, width, lane;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002609 size_t alloc_size;
2610 int i;
2611 int err;
2612
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002613 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002614 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2615 if (!mlxsw_sp->ports)
2616 return -ENOMEM;
2617
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002618 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2619 if (!mlxsw_sp->port_to_module) {
2620 err = -ENOMEM;
2621 goto err_port_to_module_alloc;
2622 }
2623
2624 for (i = 1; i < max_ports; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01002625 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002626 &width, &lane);
Ido Schimmel558c2d52016-02-26 17:32:29 +01002627 if (err)
2628 goto err_port_module_info_get;
2629 if (!width)
2630 continue;
2631 mlxsw_sp->port_to_module[i] = module;
Jiri Pirko67963a32016-10-28 21:35:55 +02002632 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2633 module, width, lane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002634 if (err)
2635 goto err_port_create;
2636 }
2637 return 0;
2638
2639err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01002640err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002641 for (i--; i >= 1; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002642 if (mlxsw_sp_port_created(mlxsw_sp, i))
2643 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002644 kfree(mlxsw_sp->port_to_module);
2645err_port_to_module_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002646 kfree(mlxsw_sp->ports);
2647 return err;
2648}
2649
Ido Schimmel18f1e702016-02-26 17:32:31 +01002650static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2651{
2652 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2653
2654 return local_port - offset;
2655}
2656
Ido Schimmelbe945352016-06-09 09:51:39 +02002657static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2658 u8 module, unsigned int count)
2659{
2660 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2661 int err, i;
2662
2663 for (i = 0; i < count; i++) {
2664 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2665 width, i * width);
2666 if (err)
2667 goto err_port_module_map;
2668 }
2669
2670 for (i = 0; i < count; i++) {
2671 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2672 if (err)
2673 goto err_port_swid_set;
2674 }
2675
2676 for (i = 0; i < count; i++) {
2677 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
Ido Schimmeld664b412016-06-09 09:51:40 +02002678 module, width, i * width);
Ido Schimmelbe945352016-06-09 09:51:39 +02002679 if (err)
2680 goto err_port_create;
2681 }
2682
2683 return 0;
2684
2685err_port_create:
2686 for (i--; i >= 0; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002687 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2688 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmelbe945352016-06-09 09:51:39 +02002689 i = count;
2690err_port_swid_set:
2691 for (i--; i >= 0; i--)
2692 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2693 MLXSW_PORT_SWID_DISABLED_PORT);
2694 i = count;
2695err_port_module_map:
2696 for (i--; i >= 0; i--)
2697 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2698 return err;
2699}
2700
2701static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2702 u8 base_port, unsigned int count)
2703{
2704 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2705 int i;
2706
2707 /* Split by four means we need to re-create two ports, otherwise
2708 * only one.
2709 */
2710 count = count / 2;
2711
2712 for (i = 0; i < count; i++) {
2713 local_port = base_port + i * 2;
2714 module = mlxsw_sp->port_to_module[local_port];
2715
2716 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2717 0);
2718 }
2719
2720 for (i = 0; i < count; i++)
2721 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2722
2723 for (i = 0; i < count; i++) {
2724 local_port = base_port + i * 2;
2725 module = mlxsw_sp->port_to_module[local_port];
2726
2727 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002728 width, 0);
Ido Schimmelbe945352016-06-09 09:51:39 +02002729 }
2730}
2731
Jiri Pirkob2f10572016-04-08 19:11:23 +02002732static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2733 unsigned int count)
Ido Schimmel18f1e702016-02-26 17:32:31 +01002734{
Jiri Pirkob2f10572016-04-08 19:11:23 +02002735 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002736 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002737 u8 module, cur_width, base_port;
2738 int i;
2739 int err;
2740
2741 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2742 if (!mlxsw_sp_port) {
2743 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2744 local_port);
2745 return -EINVAL;
2746 }
2747
Ido Schimmeld664b412016-06-09 09:51:40 +02002748 module = mlxsw_sp_port->mapping.module;
2749 cur_width = mlxsw_sp_port->mapping.width;
2750
Ido Schimmel18f1e702016-02-26 17:32:31 +01002751 if (count != 2 && count != 4) {
2752 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2753 return -EINVAL;
2754 }
2755
Ido Schimmel18f1e702016-02-26 17:32:31 +01002756 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2757 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2758 return -EINVAL;
2759 }
2760
2761 /* Make sure we have enough slave (even) ports for the split. */
2762 if (count == 2) {
2763 base_port = local_port;
2764 if (mlxsw_sp->ports[base_port + 1]) {
2765 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2766 return -EINVAL;
2767 }
2768 } else {
2769 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2770 if (mlxsw_sp->ports[base_port + 1] ||
2771 mlxsw_sp->ports[base_port + 3]) {
2772 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2773 return -EINVAL;
2774 }
2775 }
2776
2777 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002778 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2779 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002780
Ido Schimmelbe945352016-06-09 09:51:39 +02002781 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2782 if (err) {
2783 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2784 goto err_port_split_create;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002785 }
2786
2787 return 0;
2788
Ido Schimmelbe945352016-06-09 09:51:39 +02002789err_port_split_create:
2790 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002791 return err;
2792}
2793
Jiri Pirkob2f10572016-04-08 19:11:23 +02002794static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
Ido Schimmel18f1e702016-02-26 17:32:31 +01002795{
Jiri Pirkob2f10572016-04-08 19:11:23 +02002796 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002797 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmeld664b412016-06-09 09:51:40 +02002798 u8 cur_width, base_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002799 unsigned int count;
2800 int i;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002801
2802 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2803 if (!mlxsw_sp_port) {
2804 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2805 local_port);
2806 return -EINVAL;
2807 }
2808
2809 if (!mlxsw_sp_port->split) {
2810 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2811 return -EINVAL;
2812 }
2813
Ido Schimmeld664b412016-06-09 09:51:40 +02002814 cur_width = mlxsw_sp_port->mapping.width;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002815 count = cur_width == 1 ? 4 : 2;
2816
2817 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2818
2819 /* Determine which ports to remove. */
2820 if (count == 2 && local_port >= base_port + 2)
2821 base_port = base_port + 2;
2822
2823 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002824 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2825 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002826
Ido Schimmelbe945352016-06-09 09:51:39 +02002827 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002828
2829 return 0;
2830}
2831
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002832static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2833 char *pude_pl, void *priv)
2834{
2835 struct mlxsw_sp *mlxsw_sp = priv;
2836 struct mlxsw_sp_port *mlxsw_sp_port;
2837 enum mlxsw_reg_pude_oper_status status;
2838 u8 local_port;
2839
2840 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2841 mlxsw_sp_port = mlxsw_sp->ports[local_port];
Ido Schimmelbbf2a472016-07-02 11:00:14 +02002842 if (!mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002843 return;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002844
2845 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2846 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2847 netdev_info(mlxsw_sp_port->dev, "link up\n");
2848 netif_carrier_on(mlxsw_sp_port->dev);
2849 } else {
2850 netdev_info(mlxsw_sp_port->dev, "link down\n");
2851 netif_carrier_off(mlxsw_sp_port->dev);
2852 }
2853}
2854
Nogah Frankel14eeda92016-11-25 10:33:32 +01002855static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2856 u8 local_port, void *priv)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002857{
2858 struct mlxsw_sp *mlxsw_sp = priv;
2859 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2860 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2861
2862 if (unlikely(!mlxsw_sp_port)) {
2863 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2864 local_port);
2865 return;
2866 }
2867
2868 skb->dev = mlxsw_sp_port->dev;
2869
2870 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2871 u64_stats_update_begin(&pcpu_stats->syncp);
2872 pcpu_stats->rx_packets++;
2873 pcpu_stats->rx_bytes += skb->len;
2874 u64_stats_update_end(&pcpu_stats->syncp);
2875
2876 skb->protocol = eth_type_trans(skb, skb->dev);
2877 netif_receive_skb(skb);
2878}
2879
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02002880static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2881 void *priv)
2882{
2883 skb->offload_fwd_mark = 1;
Nogah Frankel14eeda92016-11-25 10:33:32 +01002884 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02002885}
2886
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002887static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
2888 void *priv)
2889{
2890 struct mlxsw_sp *mlxsw_sp = priv;
2891 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2892 struct psample_group *psample_group;
2893 u32 size;
2894
2895 if (unlikely(!mlxsw_sp_port)) {
2896 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2897 local_port);
2898 goto out;
2899 }
2900 if (unlikely(!mlxsw_sp_port->sample)) {
2901 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
2902 local_port);
2903 goto out;
2904 }
2905
2906 size = mlxsw_sp_port->sample->truncate ?
2907 mlxsw_sp_port->sample->trunc_size : skb->len;
2908
2909 rcu_read_lock();
2910 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
2911 if (!psample_group)
2912 goto out_unlock;
2913 psample_sample_packet(psample_group, skb, size,
2914 mlxsw_sp_port->dev->ifindex, 0,
2915 mlxsw_sp_port->sample->rate);
2916out_unlock:
2917 rcu_read_unlock();
2918out:
2919 consume_skb(skb);
2920}
2921
Nogah Frankel117b0da2016-11-25 10:33:44 +01002922#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel0fb78a42016-11-25 10:33:39 +01002923 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01002924 _is_ctrl, SP_##_trap_group, DISCARD)
Ido Schimmel93393b32016-08-25 18:42:38 +02002925
Nogah Frankel117b0da2016-11-25 10:33:44 +01002926#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel14eeda92016-11-25 10:33:32 +01002927 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01002928 _is_ctrl, SP_##_trap_group, DISCARD)
2929
2930#define MLXSW_SP_EVENTL(_func, _trap_id) \
2931 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
Nogah Frankel14eeda92016-11-25 10:33:32 +01002932
Nogah Frankel45449132016-11-25 10:33:35 +01002933static const struct mlxsw_listener mlxsw_sp_listener[] = {
2934 /* Events */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002935 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
Nogah Frankelee4a60d2016-11-25 10:33:29 +01002936 /* L2 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002937 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2938 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2939 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2940 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2941 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2942 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2943 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2944 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2945 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2946 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2947 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
Ido Schimmel93393b32016-08-25 18:42:38 +02002948 /* L3 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002949 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2950 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2951 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2952 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2953 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2954 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2955 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2956 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002957 /* PKT Sample trap */
2958 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
2959 false, SP_IP2ME, DISCARD)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002960};
2961
Nogah Frankel9148e7c2016-11-25 10:33:47 +01002962static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2963{
2964 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2965 enum mlxsw_reg_qpcr_ir_units ir_units;
2966 int max_cpu_policers;
2967 bool is_bytes;
2968 u8 burst_size;
2969 u32 rate;
2970 int i, err;
2971
2972 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2973 return -EIO;
2974
2975 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2976
2977 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2978 for (i = 0; i < max_cpu_policers; i++) {
2979 is_bytes = false;
2980 switch (i) {
2981 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
2982 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
2983 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
2984 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
2985 rate = 128;
2986 burst_size = 7;
2987 break;
2988 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
2989 rate = 16 * 1024;
2990 burst_size = 10;
2991 break;
2992 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
2993 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
2994 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
2995 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
2996 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2997 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
2998 rate = 1024;
2999 burst_size = 7;
3000 break;
3001 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3002 is_bytes = true;
3003 rate = 4 * 1024;
3004 burst_size = 4;
3005 break;
3006 default:
3007 continue;
3008 }
3009
3010 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3011 burst_size);
3012 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3013 if (err)
3014 return err;
3015 }
3016
3017 return 0;
3018}
3019
Nogah Frankel579c82e2016-11-25 10:33:42 +01003020static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003021{
3022 char htgt_pl[MLXSW_REG_HTGT_LEN];
Nogah Frankel117b0da2016-11-25 10:33:44 +01003023 enum mlxsw_reg_htgt_trap_group i;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003024 int max_cpu_policers;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003025 int max_trap_groups;
3026 u8 priority, tc;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003027 u16 policer_id;
Nogah Frankel117b0da2016-11-25 10:33:44 +01003028 int err;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003029
3030 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3031 return -EIO;
3032
3033 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003034 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003035
3036 for (i = 0; i < max_trap_groups; i++) {
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003037 policer_id = i;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003038 switch (i) {
Nogah Frankel117b0da2016-11-25 10:33:44 +01003039 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3040 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3041 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3042 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3043 priority = 5;
3044 tc = 5;
3045 break;
3046 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3047 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3048 priority = 4;
3049 tc = 4;
3050 break;
3051 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3052 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3053 priority = 3;
3054 tc = 3;
3055 break;
3056 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3057 priority = 2;
3058 tc = 2;
3059 break;
3060 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3061 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3062 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3063 priority = 1;
3064 tc = 1;
3065 break;
3066 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
Nogah Frankel579c82e2016-11-25 10:33:42 +01003067 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3068 tc = MLXSW_REG_HTGT_DEFAULT_TC;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003069 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003070 break;
3071 default:
3072 continue;
3073 }
Nogah Frankel117b0da2016-11-25 10:33:44 +01003074
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003075 if (max_cpu_policers <= policer_id &&
3076 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3077 return -EIO;
3078
3079 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003080 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3081 if (err)
3082 return err;
3083 }
3084
3085 return 0;
3086}
3087
3088static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3089{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003090 int i;
3091 int err;
3092
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003093 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3094 if (err)
3095 return err;
3096
Nogah Frankel579c82e2016-11-25 10:33:42 +01003097 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003098 if (err)
3099 return err;
3100
Nogah Frankel45449132016-11-25 10:33:35 +01003101 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003102 err = mlxsw_core_trap_register(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003103 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003104 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003105 if (err)
Nogah Frankel45449132016-11-25 10:33:35 +01003106 goto err_listener_register;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003107
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003108 }
3109 return 0;
3110
Nogah Frankel45449132016-11-25 10:33:35 +01003111err_listener_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003112 for (i--; i >= 0; i--) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003113 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003114 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003115 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003116 }
3117 return err;
3118}
3119
3120static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3121{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003122 int i;
3123
Nogah Frankel45449132016-11-25 10:33:35 +01003124 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003125 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003126 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003127 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003128 }
3129}
3130
3131static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3132 enum mlxsw_reg_sfgc_type type,
3133 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3134{
3135 enum mlxsw_flood_table_type table_type;
3136 enum mlxsw_sp_flood_table flood_table;
3137 char sfgc_pl[MLXSW_REG_SFGC_LEN];
3138
Ido Schimmel19ae6122015-12-15 16:03:39 +01003139 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003140 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
Ido Schimmel19ae6122015-12-15 16:03:39 +01003141 else
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003142 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
Ido Schimmel19ae6122015-12-15 16:03:39 +01003143
Nogah Frankel71c365b2017-02-09 14:54:46 +01003144 switch (type) {
3145 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
Ido Schimmel19ae6122015-12-15 16:03:39 +01003146 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003147 break;
3148 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
Nogah Frankel71c365b2017-02-09 14:54:46 +01003149 flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3150 break;
3151 default:
3152 flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3153 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003154
3155 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3156 flood_table);
3157 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3158}
3159
3160static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3161{
3162 int type, err;
3163
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003164 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3165 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3166 continue;
3167
3168 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3169 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3170 if (err)
3171 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003172
3173 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3174 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3175 if (err)
3176 return err;
3177 }
3178
3179 return 0;
3180}
3181
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003182static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3183{
3184 char slcr_pl[MLXSW_REG_SLCR_LEN];
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003185 int err;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003186
3187 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3188 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3189 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3190 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3191 MLXSW_REG_SLCR_LAG_HASH_SIP |
3192 MLXSW_REG_SLCR_LAG_HASH_DIP |
3193 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3194 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3195 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003196 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3197 if (err)
3198 return err;
3199
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003200 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3201 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003202 return -EIO;
3203
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003204 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003205 sizeof(struct mlxsw_sp_upper),
3206 GFP_KERNEL);
3207 if (!mlxsw_sp->lags)
3208 return -ENOMEM;
3209
3210 return 0;
3211}
3212
3213static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3214{
3215 kfree(mlxsw_sp->lags);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003216}
3217
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003218static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3219{
3220 char htgt_pl[MLXSW_REG_HTGT_LEN];
3221
Nogah Frankel579c82e2016-11-25 10:33:42 +01003222 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3223 MLXSW_REG_HTGT_INVALID_POLICER,
3224 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3225 MLXSW_REG_HTGT_DEFAULT_TC);
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003226 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3227}
3228
Jiri Pirkob2f10572016-04-08 19:11:23 +02003229static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003230 const struct mlxsw_bus_info *mlxsw_bus_info)
3231{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003232 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003233 int err;
3234
3235 mlxsw_sp->core = mlxsw_core;
3236 mlxsw_sp->bus_info = mlxsw_bus_info;
Ido Schimmel14d39462016-06-20 23:04:15 +02003237 INIT_LIST_HEAD(&mlxsw_sp->fids);
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02003238 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
Elad Raz3a49b4f2016-01-10 21:06:28 +01003239 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003240
3241 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3242 if (err) {
3243 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3244 return err;
3245 }
3246
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003247 err = mlxsw_sp_traps_init(mlxsw_sp);
3248 if (err) {
Nogah Frankel45449132016-11-25 10:33:35 +01003249 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3250 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003251 }
3252
3253 err = mlxsw_sp_flood_init(mlxsw_sp);
3254 if (err) {
3255 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3256 goto err_flood_init;
3257 }
3258
3259 err = mlxsw_sp_buffers_init(mlxsw_sp);
3260 if (err) {
3261 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3262 goto err_buffers_init;
3263 }
3264
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003265 err = mlxsw_sp_lag_init(mlxsw_sp);
3266 if (err) {
3267 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3268 goto err_lag_init;
3269 }
3270
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003271 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3272 if (err) {
3273 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3274 goto err_switchdev_init;
3275 }
3276
Ido Schimmel464dce12016-07-02 11:00:15 +02003277 err = mlxsw_sp_router_init(mlxsw_sp);
3278 if (err) {
3279 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3280 goto err_router_init;
3281 }
3282
Yotam Gigi763b4b72016-07-21 12:03:17 +02003283 err = mlxsw_sp_span_init(mlxsw_sp);
3284 if (err) {
3285 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3286 goto err_span_init;
3287 }
3288
Jiri Pirko22a67762017-02-03 10:29:07 +01003289 err = mlxsw_sp_acl_init(mlxsw_sp);
3290 if (err) {
3291 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3292 goto err_acl_init;
3293 }
3294
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003295 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3296 if (err) {
3297 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3298 goto err_counter_pool_init;
3299 }
3300
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003301 err = mlxsw_sp_ports_create(mlxsw_sp);
3302 if (err) {
3303 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3304 goto err_ports_create;
3305 }
3306
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003307 return 0;
3308
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003309err_ports_create:
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003310 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3311err_counter_pool_init:
Jiri Pirko22a67762017-02-03 10:29:07 +01003312 mlxsw_sp_acl_fini(mlxsw_sp);
3313err_acl_init:
Yotam Gigi763b4b72016-07-21 12:03:17 +02003314 mlxsw_sp_span_fini(mlxsw_sp);
3315err_span_init:
Ido Schimmel464dce12016-07-02 11:00:15 +02003316 mlxsw_sp_router_fini(mlxsw_sp);
3317err_router_init:
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003318 mlxsw_sp_switchdev_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003319err_switchdev_init:
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003320 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003321err_lag_init:
Jiri Pirko0f433fa2016-04-14 18:19:24 +02003322 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003323err_buffers_init:
3324err_flood_init:
3325 mlxsw_sp_traps_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003326 return err;
3327}
3328
Jiri Pirkob2f10572016-04-08 19:11:23 +02003329static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003330{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003331 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003332
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003333 mlxsw_sp_ports_remove(mlxsw_sp);
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003334 mlxsw_sp_counter_pool_fini(mlxsw_sp);
Jiri Pirko22a67762017-02-03 10:29:07 +01003335 mlxsw_sp_acl_fini(mlxsw_sp);
Yotam Gigi763b4b72016-07-21 12:03:17 +02003336 mlxsw_sp_span_fini(mlxsw_sp);
Ido Schimmel464dce12016-07-02 11:00:15 +02003337 mlxsw_sp_router_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003338 mlxsw_sp_switchdev_fini(mlxsw_sp);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003339 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko5113bfd2016-05-06 22:20:59 +02003340 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003341 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02003342 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
Ido Schimmel14d39462016-06-20 23:04:15 +02003343 WARN_ON(!list_empty(&mlxsw_sp->fids));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003344}
3345
3346static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3347 .used_max_vepa_channels = 1,
3348 .max_vepa_channels = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003349 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01003350 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003351 .used_max_pgt = 1,
3352 .max_pgt = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003353 .used_flood_tables = 1,
3354 .used_flood_mode = 1,
3355 .flood_mode = 3,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003356 .max_fid_offset_flood_tables = 3,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003357 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003358 .max_fid_flood_tables = 3,
Ido Schimmel19ae6122015-12-15 16:03:39 +01003359 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003360 .used_max_ib_mc = 1,
3361 .max_ib_mc = 0,
3362 .used_max_pkey = 1,
3363 .max_pkey = 0,
Nogah Frankel403547d2016-09-20 11:16:52 +02003364 .used_kvd_split_data = 1,
3365 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3366 .kvd_hash_single_parts = 2,
3367 .kvd_hash_double_parts = 1,
Jiri Pirkoc6022422016-07-05 11:27:46 +02003368 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003369 .swid_config = {
3370 {
3371 .used_type = 1,
3372 .type = MLXSW_PORT_SWID_TYPE_ETH,
3373 }
3374 },
Nogah Frankel57d316b2016-07-21 12:03:09 +02003375 .resource_query_enable = 1,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003376};
3377
3378static struct mlxsw_driver mlxsw_sp_driver = {
Jiri Pirko1d20d232016-10-27 15:12:59 +02003379 .kind = mlxsw_sp_driver_name,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003380 .priv_size = sizeof(struct mlxsw_sp),
3381 .init = mlxsw_sp_init,
3382 .fini = mlxsw_sp_fini,
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003383 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003384 .port_split = mlxsw_sp_port_split,
3385 .port_unsplit = mlxsw_sp_port_unsplit,
3386 .sb_pool_get = mlxsw_sp_sb_pool_get,
3387 .sb_pool_set = mlxsw_sp_sb_pool_set,
3388 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3389 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3390 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3391 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3392 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3393 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3394 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3395 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3396 .txhdr_construct = mlxsw_sp_txhdr_construct,
3397 .txhdr_len = MLXSW_TXHDR_LEN,
3398 .profile = &mlxsw_sp_config_profile,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003399};
3400
Jiri Pirko22a67762017-02-03 10:29:07 +01003401bool mlxsw_sp_port_dev_check(const struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003402{
3403 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3404}
3405
Jiri Pirko1182e532017-03-06 21:25:20 +01003406static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
David Aherndd823642016-10-17 19:15:49 -07003407{
Jiri Pirko1182e532017-03-06 21:25:20 +01003408 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
David Aherndd823642016-10-17 19:15:49 -07003409 int ret = 0;
3410
3411 if (mlxsw_sp_port_dev_check(lower_dev)) {
Jiri Pirko1182e532017-03-06 21:25:20 +01003412 *p_mlxsw_sp_port = netdev_priv(lower_dev);
David Aherndd823642016-10-17 19:15:49 -07003413 ret = 1;
3414 }
3415
3416 return ret;
3417}
3418
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003419static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3420{
Jiri Pirko1182e532017-03-06 21:25:20 +01003421 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003422
3423 if (mlxsw_sp_port_dev_check(dev))
3424 return netdev_priv(dev);
3425
Jiri Pirko1182e532017-03-06 21:25:20 +01003426 mlxsw_sp_port = NULL;
3427 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003428
Jiri Pirko1182e532017-03-06 21:25:20 +01003429 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003430}
3431
Ido Schimmel4724ba562017-03-10 08:53:39 +01003432struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003433{
3434 struct mlxsw_sp_port *mlxsw_sp_port;
3435
3436 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3437 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3438}
3439
3440static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3441{
Jiri Pirko1182e532017-03-06 21:25:20 +01003442 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003443
3444 if (mlxsw_sp_port_dev_check(dev))
3445 return netdev_priv(dev);
3446
Jiri Pirko1182e532017-03-06 21:25:20 +01003447 mlxsw_sp_port = NULL;
3448 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3449 &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003450
Jiri Pirko1182e532017-03-06 21:25:20 +01003451 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003452}
3453
3454struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3455{
3456 struct mlxsw_sp_port *mlxsw_sp_port;
3457
3458 rcu_read_lock();
3459 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3460 if (mlxsw_sp_port)
3461 dev_hold(mlxsw_sp_port->dev);
3462 rcu_read_unlock();
3463 return mlxsw_sp_port;
3464}
3465
3466void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3467{
3468 dev_put(mlxsw_sp_port->dev);
3469}
3470
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003471static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3472 u16 fid)
3473{
3474 if (mlxsw_sp_fid_is_vfid(fid))
3475 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3476 else
3477 return test_bit(fid, lag_port->active_vlans);
3478}
3479
3480static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3481 u16 fid)
Ido Schimmel039c49a2016-01-27 15:20:18 +01003482{
3483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003484 u8 local_port = mlxsw_sp_port->local_port;
3485 u16 lag_id = mlxsw_sp_port->lag_id;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003486 u64 max_lag_members;
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003487 int i, count = 0;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003488
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003489 if (!mlxsw_sp_port->lagged)
3490 return true;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003491
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003492 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3493 MAX_LAG_MEMBERS);
3494 for (i = 0; i < max_lag_members; i++) {
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003495 struct mlxsw_sp_port *lag_port;
3496
3497 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3498 if (!lag_port || lag_port->local_port == local_port)
3499 continue;
3500 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3501 count++;
3502 }
3503
3504 return !count;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003505}
3506
3507static int
3508mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3509 u16 fid)
3510{
3511 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3512 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3513
3514 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3515 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3516 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3517 mlxsw_sp_port->local_port);
3518
Ido Schimmel22305372016-06-20 23:04:21 +02003519 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3520 mlxsw_sp_port->local_port, fid);
3521
Ido Schimmel039c49a2016-01-27 15:20:18 +01003522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3523}
3524
3525static int
Ido Schimmel039c49a2016-01-27 15:20:18 +01003526mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3527 u16 fid)
3528{
3529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3530 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3531
3532 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3533 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3534 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3535
Ido Schimmel22305372016-06-20 23:04:21 +02003536 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3537 mlxsw_sp_port->lag_id, fid);
3538
Ido Schimmel039c49a2016-01-27 15:20:18 +01003539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3540}
3541
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003542int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
Ido Schimmel039c49a2016-01-27 15:20:18 +01003543{
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003544 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3545 return 0;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003546
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003547 if (mlxsw_sp_port->lagged)
3548 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
Ido Schimmel039c49a2016-01-27 15:20:18 +01003549 fid);
3550 else
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003551 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
Ido Schimmel039c49a2016-01-27 15:20:18 +01003552}
3553
Ido Schimmel701b1862016-07-04 08:23:16 +02003554static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3555{
3556 struct mlxsw_sp_fid *f, *tmp;
3557
3558 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3559 if (--f->ref_count == 0)
3560 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3561 else
3562 WARN_ON_ONCE(1);
3563}
3564
Ido Schimmel7117a572016-06-20 23:04:06 +02003565static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3566 struct net_device *br_dev)
3567{
3568 return !mlxsw_sp->master_bridge.dev ||
3569 mlxsw_sp->master_bridge.dev == br_dev;
3570}
3571
3572static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3573 struct net_device *br_dev)
3574{
3575 mlxsw_sp->master_bridge.dev = br_dev;
3576 mlxsw_sp->master_bridge.ref_count++;
3577}
3578
3579static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3580{
Ido Schimmel701b1862016-07-04 08:23:16 +02003581 if (--mlxsw_sp->master_bridge.ref_count == 0) {
Ido Schimmel7117a572016-06-20 23:04:06 +02003582 mlxsw_sp->master_bridge.dev = NULL;
Ido Schimmel701b1862016-07-04 08:23:16 +02003583 /* It's possible upper VLAN devices are still holding
3584 * references to underlying FIDs. Drop the reference
3585 * and release the resources if it was the last one.
3586 * If it wasn't, then something bad happened.
3587 */
3588 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3589 }
Ido Schimmel7117a572016-06-20 23:04:06 +02003590}
3591
3592static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3593 struct net_device *br_dev)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003594{
3595 struct net_device *dev = mlxsw_sp_port->dev;
3596 int err;
3597
3598 /* When port is not bridged untagged packets are tagged with
3599 * PVID=VID=1, thereby creating an implicit VLAN interface in
3600 * the device. Remove it and let bridge code take care of its
3601 * own VLANs.
3602 */
3603 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003604 if (err)
3605 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003606
Ido Schimmel7117a572016-06-20 23:04:06 +02003607 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3608
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003609 mlxsw_sp_port->learning = 1;
3610 mlxsw_sp_port->learning_sync = 1;
3611 mlxsw_sp_port->uc_flood = 1;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003612 mlxsw_sp_port->mc_flood = 1;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01003613 mlxsw_sp_port->mc_router = 0;
3614 mlxsw_sp_port->mc_disabled = 1;
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003615 mlxsw_sp_port->bridged = 1;
3616
3617 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003618}
3619
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003620static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003621{
3622 struct net_device *dev = mlxsw_sp_port->dev;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01003623
Ido Schimmel28a01d22016-02-18 11:30:02 +01003624 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3625
Ido Schimmel7117a572016-06-20 23:04:06 +02003626 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3627
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003628 mlxsw_sp_port->learning = 0;
3629 mlxsw_sp_port->learning_sync = 0;
3630 mlxsw_sp_port->uc_flood = 0;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003631 mlxsw_sp_port->mc_flood = 0;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01003632 mlxsw_sp_port->mc_router = 0;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01003633 mlxsw_sp_port->bridged = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003634
3635 /* Add implicit VLAN interface in the device, so that untagged
3636 * packets will be classified to the default vFID.
3637 */
Ido Schimmel82e6db02016-06-20 23:04:04 +02003638 mlxsw_sp_port_add_vid(dev, 0, 1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003639}
3640
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003641static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003642{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003643 char sldr_pl[MLXSW_REG_SLDR_LEN];
3644
3645 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3646 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3647}
3648
3649static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3650{
3651 char sldr_pl[MLXSW_REG_SLDR_LEN];
3652
3653 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3655}
3656
3657static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3658 u16 lag_id, u8 port_index)
3659{
3660 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3661 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3662
3663 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3664 lag_id, port_index);
3665 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3666}
3667
3668static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3669 u16 lag_id)
3670{
3671 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3672 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3673
3674 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3675 lag_id);
3676 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3677}
3678
3679static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3680 u16 lag_id)
3681{
3682 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3683 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3684
3685 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3686 lag_id);
3687 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3688}
3689
3690static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3691 u16 lag_id)
3692{
3693 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3694 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3695
3696 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3697 lag_id);
3698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3699}
3700
3701static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3702 struct net_device *lag_dev,
3703 u16 *p_lag_id)
3704{
3705 struct mlxsw_sp_upper *lag;
3706 int free_lag_id = -1;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003707 u64 max_lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003708 int i;
3709
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003710 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3711 for (i = 0; i < max_lag; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003712 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3713 if (lag->ref_count) {
3714 if (lag->dev == lag_dev) {
3715 *p_lag_id = i;
3716 return 0;
3717 }
3718 } else if (free_lag_id < 0) {
3719 free_lag_id = i;
3720 }
3721 }
3722 if (free_lag_id < 0)
3723 return -EBUSY;
3724 *p_lag_id = free_lag_id;
3725 return 0;
3726}
3727
3728static bool
3729mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3730 struct net_device *lag_dev,
3731 struct netdev_lag_upper_info *lag_upper_info)
3732{
3733 u16 lag_id;
3734
3735 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3736 return false;
3737 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3738 return false;
3739 return true;
3740}
3741
3742static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3743 u16 lag_id, u8 *p_port_index)
3744{
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003745 u64 max_lag_members;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003746 int i;
3747
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003748 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3749 MAX_LAG_MEMBERS);
3750 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003751 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3752 *p_port_index = i;
3753 return 0;
3754 }
3755 }
3756 return -EBUSY;
3757}
3758
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003759static void
3760mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel186962e2017-03-10 08:53:36 +01003761 struct net_device *lag_dev, u16 lag_id)
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003762{
3763 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel11943ff2016-07-02 11:00:12 +02003764 struct mlxsw_sp_fid *f;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003765
3766 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3767 if (WARN_ON(!mlxsw_sp_vport))
3768 return;
3769
Ido Schimmel11943ff2016-07-02 11:00:12 +02003770 /* If vPort is assigned a RIF, then leave it since it's no
3771 * longer valid.
3772 */
3773 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3774 if (f)
3775 f->leave(mlxsw_sp_vport);
3776
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003777 mlxsw_sp_vport->lag_id = lag_id;
3778 mlxsw_sp_vport->lagged = 1;
Ido Schimmel186962e2017-03-10 08:53:36 +01003779 mlxsw_sp_vport->dev = lag_dev;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003780}
3781
3782static void
3783mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3784{
3785 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel11943ff2016-07-02 11:00:12 +02003786 struct mlxsw_sp_fid *f;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003787
3788 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3789 if (WARN_ON(!mlxsw_sp_vport))
3790 return;
3791
Ido Schimmel11943ff2016-07-02 11:00:12 +02003792 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3793 if (f)
3794 f->leave(mlxsw_sp_vport);
3795
Ido Schimmel186962e2017-03-10 08:53:36 +01003796 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003797 mlxsw_sp_vport->lagged = 0;
3798}
3799
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003800static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3801 struct net_device *lag_dev)
3802{
3803 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3804 struct mlxsw_sp_upper *lag;
3805 u16 lag_id;
3806 u8 port_index;
3807 int err;
3808
3809 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3810 if (err)
3811 return err;
3812 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3813 if (!lag->ref_count) {
3814 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3815 if (err)
3816 return err;
3817 lag->dev = lag_dev;
3818 }
3819
3820 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3821 if (err)
3822 return err;
3823 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3824 if (err)
3825 goto err_col_port_add;
3826 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3827 if (err)
3828 goto err_col_port_enable;
3829
3830 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3831 mlxsw_sp_port->local_port);
3832 mlxsw_sp_port->lag_id = lag_id;
3833 mlxsw_sp_port->lagged = 1;
3834 lag->ref_count++;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003835
Ido Schimmel186962e2017-03-10 08:53:36 +01003836 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003837
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003838 return 0;
3839
Ido Schimmel51554db2016-05-06 22:18:39 +02003840err_col_port_enable:
3841 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003842err_col_port_add:
3843 if (!lag->ref_count)
3844 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003845 return err;
3846}
3847
Ido Schimmel82e6db02016-06-20 23:04:04 +02003848static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3849 struct net_device *lag_dev)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003850{
3851 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003852 u16 lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel1c800752016-06-20 23:04:20 +02003853 struct mlxsw_sp_upper *lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003854
3855 if (!mlxsw_sp_port->lagged)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003856 return;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003857 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3858 WARN_ON(lag->ref_count == 0);
3859
Ido Schimmel82e6db02016-06-20 23:04:04 +02003860 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3861 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003862
Ido Schimmel4dc236c2016-01-27 15:20:16 +01003863 if (mlxsw_sp_port->bridged) {
3864 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003865 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01003866 }
3867
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003868 if (lag->ref_count == 1)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003869 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003870
3871 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3872 mlxsw_sp_port->local_port);
3873 mlxsw_sp_port->lagged = 0;
3874 lag->ref_count--;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003875
3876 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003877}
3878
Jiri Pirko74581202015-12-03 12:12:30 +01003879static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3880 u16 lag_id)
3881{
3882 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3883 char sldr_pl[MLXSW_REG_SLDR_LEN];
3884
3885 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3886 mlxsw_sp_port->local_port);
3887 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3888}
3889
3890static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3891 u16 lag_id)
3892{
3893 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3894 char sldr_pl[MLXSW_REG_SLDR_LEN];
3895
3896 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3897 mlxsw_sp_port->local_port);
3898 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3899}
3900
3901static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3902 bool lag_tx_enabled)
3903{
3904 if (lag_tx_enabled)
3905 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3906 mlxsw_sp_port->lag_id);
3907 else
3908 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3909 mlxsw_sp_port->lag_id);
3910}
3911
3912static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3913 struct netdev_lag_lower_state_info *info)
3914{
3915 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3916}
3917
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003918static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3919 struct net_device *vlan_dev)
3920{
3921 struct mlxsw_sp_port *mlxsw_sp_vport;
3922 u16 vid = vlan_dev_vlan_id(vlan_dev);
3923
3924 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel423b9372016-06-20 23:04:03 +02003925 if (WARN_ON(!mlxsw_sp_vport))
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003926 return -EINVAL;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003927
3928 mlxsw_sp_vport->dev = vlan_dev;
3929
3930 return 0;
3931}
3932
Ido Schimmel82e6db02016-06-20 23:04:04 +02003933static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3934 struct net_device *vlan_dev)
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003935{
3936 struct mlxsw_sp_port *mlxsw_sp_vport;
3937 u16 vid = vlan_dev_vlan_id(vlan_dev);
3938
3939 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel423b9372016-06-20 23:04:03 +02003940 if (WARN_ON(!mlxsw_sp_vport))
Ido Schimmel82e6db02016-06-20 23:04:04 +02003941 return;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003942
3943 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003944}
3945
Jiri Pirko74581202015-12-03 12:12:30 +01003946static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3947 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003948{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003949 struct netdev_notifier_changeupper_info *info;
3950 struct mlxsw_sp_port *mlxsw_sp_port;
3951 struct net_device *upper_dev;
3952 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel80bedf12016-06-20 23:03:59 +02003953 int err = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003954
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003955 mlxsw_sp_port = netdev_priv(dev);
3956 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3957 info = ptr;
3958
3959 switch (event) {
3960 case NETDEV_PRECHANGEUPPER:
3961 upper_dev = info->upper_dev;
Ido Schimmel59fe9b32016-06-20 23:04:00 +02003962 if (!is_vlan_dev(upper_dev) &&
3963 !netif_is_lag_master(upper_dev) &&
Ido Schimmel7179eb52017-03-16 09:08:18 +01003964 !netif_is_bridge_master(upper_dev) &&
3965 !netif_is_l3_master(upper_dev))
Ido Schimmel59fe9b32016-06-20 23:04:00 +02003966 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02003967 if (!info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003968 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003969 /* HW limitation forbids to put ports to multiple bridges. */
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003970 if (netif_is_bridge_master(upper_dev) &&
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003971 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02003972 return -EINVAL;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003973 if (netif_is_lag_master(upper_dev) &&
3974 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3975 info->upper_info))
Ido Schimmel80bedf12016-06-20 23:03:59 +02003976 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02003977 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3978 return -EINVAL;
3979 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3980 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3981 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003982 break;
3983 case NETDEV_CHANGEUPPER:
3984 upper_dev = info->upper_dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003985 if (is_vlan_dev(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02003986 if (info->linking)
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003987 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3988 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02003989 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02003990 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3991 upper_dev);
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003992 } else if (netif_is_bridge_master(upper_dev)) {
Ido Schimmel7117a572016-06-20 23:04:06 +02003993 if (info->linking)
3994 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3995 upper_dev);
3996 else
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003997 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003998 } else if (netif_is_lag_master(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02003999 if (info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004000 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4001 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004002 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02004003 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4004 upper_dev);
Ido Schimmel7179eb52017-03-16 09:08:18 +01004005 } else if (netif_is_l3_master(upper_dev)) {
4006 if (info->linking)
4007 err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
4008 else
4009 mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004010 } else {
4011 err = -EINVAL;
4012 WARN_ON(1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004013 }
4014 break;
4015 }
4016
Ido Schimmel80bedf12016-06-20 23:03:59 +02004017 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004018}
4019
Jiri Pirko74581202015-12-03 12:12:30 +01004020static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4021 unsigned long event, void *ptr)
4022{
4023 struct netdev_notifier_changelowerstate_info *info;
4024 struct mlxsw_sp_port *mlxsw_sp_port;
4025 int err;
4026
4027 mlxsw_sp_port = netdev_priv(dev);
4028 info = ptr;
4029
4030 switch (event) {
4031 case NETDEV_CHANGELOWERSTATE:
4032 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4033 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4034 info->lower_state_info);
4035 if (err)
4036 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4037 }
4038 break;
4039 }
4040
Ido Schimmel80bedf12016-06-20 23:03:59 +02004041 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004042}
4043
4044static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4045 unsigned long event, void *ptr)
4046{
4047 switch (event) {
4048 case NETDEV_PRECHANGEUPPER:
4049 case NETDEV_CHANGEUPPER:
4050 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4051 case NETDEV_CHANGELOWERSTATE:
4052 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4053 }
4054
Ido Schimmel80bedf12016-06-20 23:03:59 +02004055 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004056}
4057
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004058static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4059 unsigned long event, void *ptr)
4060{
4061 struct net_device *dev;
4062 struct list_head *iter;
4063 int ret;
4064
4065 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4066 if (mlxsw_sp_port_dev_check(dev)) {
4067 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004068 if (ret)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004069 return ret;
4070 }
4071 }
4072
Ido Schimmel80bedf12016-06-20 23:03:59 +02004073 return 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004074}
4075
Ido Schimmel701b1862016-07-04 08:23:16 +02004076static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4077 struct net_device *vlan_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004078{
Ido Schimmel701b1862016-07-04 08:23:16 +02004079 u16 fid = vlan_dev_vlan_id(vlan_dev);
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004080 struct mlxsw_sp_fid *f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004081
Ido Schimmel701b1862016-07-04 08:23:16 +02004082 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4083 if (!f) {
4084 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4085 if (IS_ERR(f))
4086 return PTR_ERR(f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004087 }
4088
Ido Schimmel701b1862016-07-04 08:23:16 +02004089 f->ref_count++;
4090
4091 return 0;
4092}
4093
4094static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4095 struct net_device *vlan_dev)
4096{
4097 u16 fid = vlan_dev_vlan_id(vlan_dev);
4098 struct mlxsw_sp_fid *f;
4099
4100 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004101 if (f && f->rif)
4102 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel701b1862016-07-04 08:23:16 +02004103 if (f && --f->ref_count == 0)
4104 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4105}
4106
4107static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4108 unsigned long event, void *ptr)
4109{
4110 struct netdev_notifier_changeupper_info *info;
4111 struct net_device *upper_dev;
4112 struct mlxsw_sp *mlxsw_sp;
Ido Schimmelb4149702017-03-10 08:53:34 +01004113 int err = 0;
Ido Schimmel701b1862016-07-04 08:23:16 +02004114
4115 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4116 if (!mlxsw_sp)
4117 return 0;
Ido Schimmel701b1862016-07-04 08:23:16 +02004118
4119 info = ptr;
4120
4121 switch (event) {
Ido Schimmelb4149702017-03-10 08:53:34 +01004122 case NETDEV_PRECHANGEUPPER:
Ido Schimmel701b1862016-07-04 08:23:16 +02004123 upper_dev = info->upper_dev;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004124 if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
Ido Schimmelb4149702017-03-10 08:53:34 +01004125 return -EINVAL;
4126 if (is_vlan_dev(upper_dev) &&
4127 br_dev != mlxsw_sp->master_bridge.dev)
4128 return -EINVAL;
4129 break;
4130 case NETDEV_CHANGEUPPER:
4131 upper_dev = info->upper_dev;
4132 if (is_vlan_dev(upper_dev)) {
4133 if (info->linking)
4134 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4135 upper_dev);
4136 else
4137 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4138 upper_dev);
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004139 } else if (netif_is_l3_master(upper_dev)) {
4140 if (info->linking)
4141 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4142 br_dev);
4143 else
4144 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
Ido Schimmel701b1862016-07-04 08:23:16 +02004145 } else {
Ido Schimmelb4149702017-03-10 08:53:34 +01004146 err = -EINVAL;
4147 WARN_ON(1);
Ido Schimmel701b1862016-07-04 08:23:16 +02004148 }
4149 break;
4150 }
4151
Ido Schimmelb4149702017-03-10 08:53:34 +01004152 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004153}
4154
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004155static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004156{
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004157 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
Ido Schimmel99724c12016-07-04 08:23:14 +02004158 MLXSW_SP_VFID_MAX);
4159}
4160
4161static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4162{
4163 char sfmr_pl[MLXSW_REG_SFMR_LEN];
4164
4165 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004167}
4168
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004169static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
Ido Schimmel1c800752016-06-20 23:04:20 +02004170
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004171static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4172 struct net_device *br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004173{
4174 struct device *dev = mlxsw_sp->bus_info->dev;
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004175 struct mlxsw_sp_fid *f;
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004176 u16 vfid, fid;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004177 int err;
4178
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004179 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004180 if (vfid == MLXSW_SP_VFID_MAX) {
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004181 dev_err(dev, "No available vFIDs\n");
4182 return ERR_PTR(-ERANGE);
4183 }
4184
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004185 fid = mlxsw_sp_vfid_to_fid(vfid);
4186 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004187 if (err) {
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004188 dev_err(dev, "Failed to create FID=%d\n", fid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004189 return ERR_PTR(err);
4190 }
4191
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004192 f = kzalloc(sizeof(*f), GFP_KERNEL);
4193 if (!f)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004194 goto err_allocate_vfid;
4195
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004196 f->leave = mlxsw_sp_vport_vfid_leave;
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004197 f->fid = fid;
4198 f->dev = br_dev;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004199
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004200 list_add(&f->list, &mlxsw_sp->vfids.list);
4201 set_bit(vfid, mlxsw_sp->vfids.mapped);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004202
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004203 return f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004204
4205err_allocate_vfid:
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004206 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004207 return ERR_PTR(-ENOMEM);
4208}
4209
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004210static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4211 struct mlxsw_sp_fid *f)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004212{
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004213 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004214 u16 fid = f->fid;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004215
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004216 clear_bit(vfid, mlxsw_sp->vfids.mapped);
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004217 list_del(&f->list);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004218
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004219 if (f->rif)
4220 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004221
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004222 kfree(f);
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004223
4224 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004225}
4226
Ido Schimmel99724c12016-07-04 08:23:14 +02004227static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4228 bool valid)
4229{
4230 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4231 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4232
4233 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4234 vid);
4235}
4236
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004237static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4238 struct net_device *br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004239{
Ido Schimmel0355b592016-06-20 23:04:13 +02004240 struct mlxsw_sp_fid *f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004241 int err;
4242
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004243 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
Ido Schimmel0355b592016-06-20 23:04:13 +02004244 if (!f) {
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004245 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
Ido Schimmel0355b592016-06-20 23:04:13 +02004246 if (IS_ERR(f))
4247 return PTR_ERR(f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004248 }
4249
Ido Schimmel0355b592016-06-20 23:04:13 +02004250 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4251 if (err)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004252 goto err_vport_flood_set;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004253
Ido Schimmel0355b592016-06-20 23:04:13 +02004254 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4255 if (err)
4256 goto err_vport_fid_map;
Ido Schimmel6a9863a2016-02-15 13:19:54 +01004257
Ido Schimmel41b996c2016-06-20 23:04:17 +02004258 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
Ido Schimmel0355b592016-06-20 23:04:13 +02004259 f->ref_count++;
Ido Schimmel039c49a2016-01-27 15:20:18 +01004260
Ido Schimmel22305372016-06-20 23:04:21 +02004261 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4262
Ido Schimmel0355b592016-06-20 23:04:13 +02004263 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004264
Ido Schimmel9c4d4422016-06-20 23:04:10 +02004265err_vport_fid_map:
Ido Schimmel0355b592016-06-20 23:04:13 +02004266 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4267err_vport_flood_set:
4268 if (!f->ref_count)
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004269 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
Ido Schimmel0355b592016-06-20 23:04:13 +02004270 return err;
4271}
4272
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004273static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02004274{
Ido Schimmel41b996c2016-06-20 23:04:17 +02004275 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004276
Ido Schimmel22305372016-06-20 23:04:21 +02004277 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4278
Ido Schimmel0355b592016-06-20 23:04:13 +02004279 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4280
4281 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4282
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004283 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4284
Ido Schimmel41b996c2016-06-20 23:04:17 +02004285 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
Ido Schimmel0355b592016-06-20 23:04:13 +02004286 if (--f->ref_count == 0)
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004287 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004288}
4289
4290static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4291 struct net_device *br_dev)
4292{
Ido Schimmel99724c12016-07-04 08:23:14 +02004293 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004294 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4295 struct net_device *dev = mlxsw_sp_vport->dev;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004296 int err;
4297
Ido Schimmel99724c12016-07-04 08:23:14 +02004298 if (f && !WARN_ON(!f->leave))
4299 f->leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004300
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004301 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004302 if (err) {
Ido Schimmel0355b592016-06-20 23:04:13 +02004303 netdev_err(dev, "Failed to join vFID\n");
Ido Schimmel99724c12016-07-04 08:23:14 +02004304 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004305 }
4306
4307 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4308 if (err) {
4309 netdev_err(dev, "Failed to enable learning\n");
4310 goto err_port_vid_learning_set;
4311 }
4312
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004313 mlxsw_sp_vport->learning = 1;
4314 mlxsw_sp_vport->learning_sync = 1;
4315 mlxsw_sp_vport->uc_flood = 1;
Nogah Frankel71c365b2017-02-09 14:54:46 +01004316 mlxsw_sp_vport->mc_flood = 1;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01004317 mlxsw_sp_vport->mc_router = 0;
4318 mlxsw_sp_vport->mc_disabled = 1;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004319 mlxsw_sp_vport->bridged = 1;
4320
4321 return 0;
4322
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004323err_port_vid_learning_set:
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004324 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004325 return err;
4326}
4327
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004328static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02004329{
4330 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004331
4332 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4333
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004334 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004335
Ido Schimmel0355b592016-06-20 23:04:13 +02004336 mlxsw_sp_vport->learning = 0;
4337 mlxsw_sp_vport->learning_sync = 0;
4338 mlxsw_sp_vport->uc_flood = 0;
Nogah Frankel71c365b2017-02-09 14:54:46 +01004339 mlxsw_sp_vport->mc_flood = 0;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01004340 mlxsw_sp_vport->mc_router = 0;
Ido Schimmel0355b592016-06-20 23:04:13 +02004341 mlxsw_sp_vport->bridged = 0;
4342}
4343
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004344static bool
4345mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4346 const struct net_device *br_dev)
4347{
4348 struct mlxsw_sp_port *mlxsw_sp_vport;
4349
4350 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4351 vport.list) {
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004352 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
Ido Schimmel56918b62016-06-20 23:04:18 +02004353
4354 if (dev && dev == br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004355 return false;
4356 }
4357
4358 return true;
4359}
4360
4361static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4362 unsigned long event, void *ptr,
4363 u16 vid)
4364{
4365 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4366 struct netdev_notifier_changeupper_info *info = ptr;
4367 struct mlxsw_sp_port *mlxsw_sp_vport;
4368 struct net_device *upper_dev;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004369 int err = 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004370
4371 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel1f880612017-03-10 08:53:35 +01004372 if (!mlxsw_sp_vport)
4373 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004374
4375 switch (event) {
4376 case NETDEV_PRECHANGEUPPER:
4377 upper_dev = info->upper_dev;
Ido Schimmel7179eb52017-03-16 09:08:18 +01004378 if (!netif_is_bridge_master(upper_dev) &&
4379 !netif_is_l3_master(upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004380 return -EINVAL;
Ido Schimmelddbe9932016-06-20 23:04:02 +02004381 if (!info->linking)
4382 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004383 /* We can't have multiple VLAN interfaces configured on
4384 * the same port and being members in the same bridge.
4385 */
Ido Schimmel7179eb52017-03-16 09:08:18 +01004386 if (netif_is_bridge_master(upper_dev) &&
4387 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004388 upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004389 return -EINVAL;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004390 break;
4391 case NETDEV_CHANGEUPPER:
4392 upper_dev = info->upper_dev;
Ido Schimmel1f880612017-03-10 08:53:35 +01004393 if (netif_is_bridge_master(upper_dev)) {
4394 if (info->linking)
4395 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4396 upper_dev);
4397 else
4398 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
Ido Schimmel7179eb52017-03-16 09:08:18 +01004399 } else if (netif_is_l3_master(upper_dev)) {
4400 if (info->linking)
4401 err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
4402 else
4403 mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004404 } else {
Ido Schimmel1f880612017-03-10 08:53:35 +01004405 err = -EINVAL;
4406 WARN_ON(1);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004407 }
Ido Schimmel1f880612017-03-10 08:53:35 +01004408 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004409 }
4410
Ido Schimmel80bedf12016-06-20 23:03:59 +02004411 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004412}
4413
Ido Schimmel272c4472015-12-15 16:03:47 +01004414static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4415 unsigned long event, void *ptr,
4416 u16 vid)
4417{
4418 struct net_device *dev;
4419 struct list_head *iter;
4420 int ret;
4421
4422 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4423 if (mlxsw_sp_port_dev_check(dev)) {
4424 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4425 vid);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004426 if (ret)
Ido Schimmel272c4472015-12-15 16:03:47 +01004427 return ret;
4428 }
4429 }
4430
Ido Schimmel80bedf12016-06-20 23:03:59 +02004431 return 0;
Ido Schimmel272c4472015-12-15 16:03:47 +01004432}
4433
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004434static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4435 unsigned long event, void *ptr)
4436{
4437 struct netdev_notifier_changeupper_info *info;
4438 struct mlxsw_sp *mlxsw_sp;
4439 int err = 0;
4440
4441 mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4442 if (!mlxsw_sp)
4443 return 0;
4444
4445 info = ptr;
4446
4447 switch (event) {
4448 case NETDEV_PRECHANGEUPPER:
4449 /* VLAN devices are only allowed on top of the
4450 * VLAN-aware bridge.
4451 */
4452 if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
4453 mlxsw_sp->master_bridge.dev))
4454 return -EINVAL;
4455 if (!netif_is_l3_master(info->upper_dev))
4456 return -EINVAL;
4457 break;
4458 case NETDEV_CHANGEUPPER:
4459 if (netif_is_l3_master(info->upper_dev)) {
4460 if (info->linking)
4461 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4462 vlan_dev);
4463 else
4464 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
4465 } else {
4466 err = -EINVAL;
4467 WARN_ON(1);
4468 }
4469 break;
4470 }
4471
4472 return err;
4473}
4474
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004475static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4476 unsigned long event, void *ptr)
4477{
4478 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4479 u16 vid = vlan_dev_vlan_id(vlan_dev);
4480
Ido Schimmel272c4472015-12-15 16:03:47 +01004481 if (mlxsw_sp_port_dev_check(real_dev))
4482 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4483 vid);
4484 else if (netif_is_lag_master(real_dev))
4485 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4486 vid);
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004487 else if (netif_is_bridge_master(real_dev))
4488 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
4489 ptr);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004490
Ido Schimmel80bedf12016-06-20 23:03:59 +02004491 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004492}
4493
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004494static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4495 unsigned long event, void *ptr)
4496{
4497 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004498 int err = 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004499
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004500 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4501 err = mlxsw_sp_netdevice_router_port_event(dev);
4502 else if (mlxsw_sp_port_dev_check(dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004503 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4504 else if (netif_is_lag_master(dev))
4505 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
Ido Schimmel701b1862016-07-04 08:23:16 +02004506 else if (netif_is_bridge_master(dev))
4507 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004508 else if (is_vlan_dev(dev))
4509 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004510
Ido Schimmel80bedf12016-06-20 23:03:59 +02004511 return notifier_from_errno(err);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004512}
4513
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004514static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4515 .notifier_call = mlxsw_sp_netdevice_event,
4516};
4517
Ido Schimmel99724c12016-07-04 08:23:14 +02004518static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4519 .notifier_call = mlxsw_sp_inetaddr_event,
4520 .priority = 10, /* Must be called before FIB notifier block */
4521};
4522
Jiri Pirkoe7322632016-09-01 10:37:43 +02004523static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4524 .notifier_call = mlxsw_sp_router_netevent_event,
4525};
4526
Jiri Pirko1d20d232016-10-27 15:12:59 +02004527static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4528 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4529 {0, },
4530};
4531
4532static struct pci_driver mlxsw_sp_pci_driver = {
4533 .name = mlxsw_sp_driver_name,
4534 .id_table = mlxsw_sp_pci_id_table,
4535};
4536
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004537static int __init mlxsw_sp_module_init(void)
4538{
4539 int err;
4540
4541 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004542 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004543 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4544
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004545 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4546 if (err)
4547 goto err_core_driver_register;
Jiri Pirko1d20d232016-10-27 15:12:59 +02004548
4549 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4550 if (err)
4551 goto err_pci_driver_register;
4552
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004553 return 0;
4554
Jiri Pirko1d20d232016-10-27 15:12:59 +02004555err_pci_driver_register:
4556 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004557err_core_driver_register:
Jiri Pirkoe7322632016-09-01 10:37:43 +02004558 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Jiri Pirkode7d6292016-09-01 10:37:42 +02004559 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004560 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4561 return err;
4562}
4563
4564static void __exit mlxsw_sp_module_exit(void)
4565{
Jiri Pirko1d20d232016-10-27 15:12:59 +02004566 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004567 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004568 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004569 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004570 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4571}
4572
4573module_init(mlxsw_sp_module_init);
4574module_exit(mlxsw_sp_module_exit);
4575
4576MODULE_LICENSE("Dual BSD/GPL");
4577MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4578MODULE_DESCRIPTION("Mellanox Spectrum driver");
Jiri Pirko1d20d232016-10-27 15:12:59 +02004579MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);