blob: af430484ed2f6287509a6f179814d60a00f7d530 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Jiri Pirko22a67762017-02-03 10:29:07 +01003 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko56ade8f2015-10-16 14:01:37 +02005 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
Jiri Pirko1d20d232016-10-27 15:12:59 +020040#include <linux/pci.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020041#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/slab.h>
45#include <linux/device.h>
46#include <linux/skbuff.h>
47#include <linux/if_vlan.h>
48#include <linux/if_bridge.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010052#include <linux/list.h>
Ido Schimmel80bedf12016-06-20 23:03:59 +020053#include <linux/notifier.h>
Ido Schimmel90183b92016-04-06 17:10:08 +020054#include <linux/dcbnl.h>
Ido Schimmel99724c12016-07-04 08:23:14 +020055#include <linux/inetdevice.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020056#include <net/switchdev.h>
Yotam Gigi763b4b72016-07-21 12:03:17 +020057#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h>
Jiri Pirkoe7322632016-09-01 10:37:43 +020059#include <net/netevent.h>
Yotam Gigi98d0f7b2017-01-23 11:07:11 +010060#include <net/tc_act/tc_sample.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020061
62#include "spectrum.h"
Jiri Pirko1d20d232016-10-27 15:12:59 +020063#include "pci.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020064#include "core.h"
65#include "reg.h"
66#include "port.h"
67#include "trap.h"
68#include "txheader.h"
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +010069#include "spectrum_cnt.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020070
71static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
72static const char mlxsw_sp_driver_version[] = "1.0";
73
74/* tx_hdr_version
75 * Tx header version.
76 * Must be set to 1.
77 */
78MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
79
80/* tx_hdr_ctl
81 * Packet control type.
82 * 0 - Ethernet control (e.g. EMADs, LACP)
83 * 1 - Ethernet data
84 */
85MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
86
87/* tx_hdr_proto
88 * Packet protocol type. Must be set to 1 (Ethernet).
89 */
90MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
91
92/* tx_hdr_rx_is_router
93 * Packet is sent from the router. Valid for data packets only.
94 */
95MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
96
97/* tx_hdr_fid_valid
98 * Indicates if the 'fid' field is valid and should be used for
99 * forwarding lookup. Valid for data packets only.
100 */
101MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
102
103/* tx_hdr_swid
104 * Switch partition ID. Must be set to 0.
105 */
106MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
107
108/* tx_hdr_control_tclass
109 * Indicates if the packet should use the control TClass and not one
110 * of the data TClasses.
111 */
112MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
113
114/* tx_hdr_etclass
115 * Egress TClass to be used on the egress device on the egress port.
116 */
117MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
118
119/* tx_hdr_port_mid
120 * Destination local port for unicast packets.
121 * Destination multicast ID for multicast packets.
122 *
123 * Control packets are directed to a specific egress port, while data
124 * packets are transmitted through the CPU port (0) into the switch partition,
125 * where forwarding rules are applied.
126 */
127MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
128
129/* tx_hdr_fid
130 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
131 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
132 * Valid for data packets only.
133 */
134MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
135
136/* tx_hdr_type
137 * 0 - Data packets
138 * 6 - Control packets
139 */
140MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
141
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100142int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
143 unsigned int counter_index, u64 *packets,
144 u64 *bytes)
145{
146 char mgpc_pl[MLXSW_REG_MGPC_LEN];
147 int err;
148
149 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
150 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
152 if (err)
153 return err;
154 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
155 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
156 return 0;
157}
158
159static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
160 unsigned int counter_index)
161{
162 char mgpc_pl[MLXSW_REG_MGPC_LEN];
163
164 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
165 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
167}
168
169int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
170 unsigned int *p_counter_index)
171{
172 int err;
173
174 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
175 p_counter_index);
176 if (err)
177 return err;
178 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
179 if (err)
180 goto err_counter_clear;
181 return 0;
182
183err_counter_clear:
184 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
185 *p_counter_index);
186 return err;
187}
188
189void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
190 unsigned int counter_index)
191{
192 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
193 counter_index);
194}
195
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200196static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
197 const struct mlxsw_tx_info *tx_info)
198{
199 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
200
201 memset(txhdr, 0, MLXSW_TXHDR_LEN);
202
203 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
204 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
205 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
206 mlxsw_tx_hdr_swid_set(txhdr, 0);
207 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
208 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
209 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
210}
211
212static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
213{
Elad Raz5b090742016-10-28 21:35:46 +0200214 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200215 int err;
216
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
218 if (err)
219 return err;
220 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
221 return 0;
222}
223
Yotam Gigi763b4b72016-07-21 12:03:17 +0200224static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
225{
Yotam Gigi763b4b72016-07-21 12:03:17 +0200226 int i;
227
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200228 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
Yotam Gigi763b4b72016-07-21 12:03:17 +0200229 return -EIO;
230
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200231 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
232 MAX_SPAN);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200233 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
234 sizeof(struct mlxsw_sp_span_entry),
235 GFP_KERNEL);
236 if (!mlxsw_sp->span.entries)
237 return -ENOMEM;
238
239 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
240 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
241
242 return 0;
243}
244
245static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
246{
247 int i;
248
249 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
250 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
251
252 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
253 }
254 kfree(mlxsw_sp->span.entries);
255}
256
257static struct mlxsw_sp_span_entry *
258mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
259{
260 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
261 struct mlxsw_sp_span_entry *span_entry;
262 char mpat_pl[MLXSW_REG_MPAT_LEN];
263 u8 local_port = port->local_port;
264 int index;
265 int i;
266 int err;
267
268 /* find a free entry to use */
269 index = -1;
270 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
271 if (!mlxsw_sp->span.entries[i].used) {
272 index = i;
273 span_entry = &mlxsw_sp->span.entries[i];
274 break;
275 }
276 }
277 if (index < 0)
278 return NULL;
279
280 /* create a new port analayzer entry for local_port */
281 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
282 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
283 if (err)
284 return NULL;
285
286 span_entry->used = true;
287 span_entry->id = index;
Yotam Gigi2d644d42016-11-11 16:34:25 +0100288 span_entry->ref_count = 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200289 span_entry->local_port = local_port;
290 return span_entry;
291}
292
293static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
294 struct mlxsw_sp_span_entry *span_entry)
295{
296 u8 local_port = span_entry->local_port;
297 char mpat_pl[MLXSW_REG_MPAT_LEN];
298 int pa_id = span_entry->id;
299
300 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
301 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
302 span_entry->used = false;
303}
304
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200305static struct mlxsw_sp_span_entry *
306mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200307{
308 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
309 int i;
310
311 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
312 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
313
314 if (curr->used && curr->local_port == port->local_port)
315 return curr;
316 }
317 return NULL;
318}
319
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200320static struct mlxsw_sp_span_entry
321*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200322{
323 struct mlxsw_sp_span_entry *span_entry;
324
325 span_entry = mlxsw_sp_span_entry_find(port);
326 if (span_entry) {
Yotam Gigi2d644d42016-11-11 16:34:25 +0100327 /* Already exists, just take a reference */
Yotam Gigi763b4b72016-07-21 12:03:17 +0200328 span_entry->ref_count++;
329 return span_entry;
330 }
331
332 return mlxsw_sp_span_entry_create(port);
333}
334
335static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_span_entry *span_entry)
337{
Yotam Gigi2d644d42016-11-11 16:34:25 +0100338 WARN_ON(!span_entry->ref_count);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200339 if (--span_entry->ref_count == 0)
340 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
341 return 0;
342}
343
344static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
345{
346 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
347 struct mlxsw_sp_span_inspected_port *p;
348 int i;
349
350 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
351 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
352
353 list_for_each_entry(p, &curr->bound_ports_list, list)
354 if (p->local_port == port->local_port &&
355 p->type == MLXSW_SP_SPAN_EGRESS)
356 return true;
357 }
358
359 return false;
360}
361
362static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
363{
364 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
365}
366
367static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
368{
369 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
370 char sbib_pl[MLXSW_REG_SBIB_LEN];
371 int err;
372
373 /* If port is egress mirrored, the shared buffer size should be
374 * updated according to the mtu value
375 */
376 if (mlxsw_sp_span_is_egress_mirror(port)) {
377 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
378 mlxsw_sp_span_mtu_to_buffsize(mtu));
379 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
380 if (err) {
381 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
382 return err;
383 }
384 }
385
386 return 0;
387}
388
389static struct mlxsw_sp_span_inspected_port *
390mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
391 struct mlxsw_sp_span_entry *span_entry)
392{
393 struct mlxsw_sp_span_inspected_port *p;
394
395 list_for_each_entry(p, &span_entry->bound_ports_list, list)
396 if (port->local_port == p->local_port)
397 return p;
398 return NULL;
399}
400
401static int
402mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
403 struct mlxsw_sp_span_entry *span_entry,
404 enum mlxsw_sp_span_type type)
405{
406 struct mlxsw_sp_span_inspected_port *inspected_port;
407 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
408 char mpar_pl[MLXSW_REG_MPAR_LEN];
409 char sbib_pl[MLXSW_REG_SBIB_LEN];
410 int pa_id = span_entry->id;
411 int err;
412
413 /* if it is an egress SPAN, bind a shared buffer to it */
414 if (type == MLXSW_SP_SPAN_EGRESS) {
415 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
416 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
417 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
418 if (err) {
419 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
420 return err;
421 }
422 }
423
424 /* bind the port to the SPAN entry */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200425 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
426 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200427 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
428 if (err)
429 goto err_mpar_reg_write;
430
431 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
432 if (!inspected_port) {
433 err = -ENOMEM;
434 goto err_inspected_port_alloc;
435 }
436 inspected_port->local_port = port->local_port;
437 inspected_port->type = type;
438 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
439
440 return 0;
441
442err_mpar_reg_write:
443err_inspected_port_alloc:
444 if (type == MLXSW_SP_SPAN_EGRESS) {
445 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
446 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
447 }
448 return err;
449}
450
451static void
452mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
453 struct mlxsw_sp_span_entry *span_entry,
454 enum mlxsw_sp_span_type type)
455{
456 struct mlxsw_sp_span_inspected_port *inspected_port;
457 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
458 char mpar_pl[MLXSW_REG_MPAR_LEN];
459 char sbib_pl[MLXSW_REG_SBIB_LEN];
460 int pa_id = span_entry->id;
461
462 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
463 if (!inspected_port)
464 return;
465
466 /* remove the inspected port */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200467 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
468 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200469 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
470
471 /* remove the SBIB buffer if it was egress SPAN */
472 if (type == MLXSW_SP_SPAN_EGRESS) {
473 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
474 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
475 }
476
477 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
478
479 list_del(&inspected_port->list);
480 kfree(inspected_port);
481}
482
483static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
484 struct mlxsw_sp_port *to,
485 enum mlxsw_sp_span_type type)
486{
487 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
488 struct mlxsw_sp_span_entry *span_entry;
489 int err;
490
491 span_entry = mlxsw_sp_span_entry_get(to);
492 if (!span_entry)
493 return -ENOENT;
494
495 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
496 span_entry->id);
497
498 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
499 if (err)
500 goto err_port_bind;
501
502 return 0;
503
504err_port_bind:
505 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
506 return err;
507}
508
509static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
510 struct mlxsw_sp_port *to,
511 enum mlxsw_sp_span_type type)
512{
513 struct mlxsw_sp_span_entry *span_entry;
514
515 span_entry = mlxsw_sp_span_entry_find(to);
516 if (!span_entry) {
517 netdev_err(from->dev, "no span entry found\n");
518 return;
519 }
520
521 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
522 span_entry->id);
523 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
524}
525
Yotam Gigi98d0f7b2017-01-23 11:07:11 +0100526static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
527 bool enable, u32 rate)
528{
529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
530 char mpsc_pl[MLXSW_REG_MPSC_LEN];
531
532 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
533 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
534}
535
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200536static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
537 bool is_up)
538{
539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
540 char paos_pl[MLXSW_REG_PAOS_LEN];
541
542 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
543 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
544 MLXSW_PORT_ADMIN_STATUS_DOWN);
545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
546}
547
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200548static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
549 unsigned char *addr)
550{
551 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
552 char ppad_pl[MLXSW_REG_PPAD_LEN];
553
554 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
555 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
557}
558
559static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
560{
561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
562 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
563
564 ether_addr_copy(addr, mlxsw_sp->base_mac);
565 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
566 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
567}
568
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200569static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
570{
571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572 char pmtu_pl[MLXSW_REG_PMTU_LEN];
573 int max_mtu;
574 int err;
575
576 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
577 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
578 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
579 if (err)
580 return err;
581 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
582
583 if (mtu > max_mtu)
584 return -EINVAL;
585
586 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
587 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
588}
589
Ido Schimmelbe945352016-06-09 09:51:39 +0200590static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
591 u8 swid)
592{
593 char pspa_pl[MLXSW_REG_PSPA_LEN];
594
595 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
597}
598
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200599static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
600{
601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200602
Ido Schimmelbe945352016-06-09 09:51:39 +0200603 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
604 swid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200605}
606
607static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
608 bool enable)
609{
610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
611 char svpe_pl[MLXSW_REG_SVPE_LEN];
612
613 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
615}
616
617int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
618 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
619 u16 vid)
620{
621 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 char svfa_pl[MLXSW_REG_SVFA_LEN];
623
624 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
625 fid, vid);
626 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
627}
628
Ido Schimmel584d73d2016-08-24 12:00:26 +0200629int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
630 u16 vid_begin, u16 vid_end,
631 bool learn_enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200632{
633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
634 char *spvmlr_pl;
635 int err;
636
637 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
638 if (!spvmlr_pl)
639 return -ENOMEM;
Ido Schimmel584d73d2016-08-24 12:00:26 +0200640 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
641 vid_end, learn_enable);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200642 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
643 kfree(spvmlr_pl);
644 return err;
645}
646
Ido Schimmel584d73d2016-08-24 12:00:26 +0200647static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
648 u16 vid, bool learn_enable)
649{
650 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
651 learn_enable);
652}
653
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200654static int
655mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
656{
657 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
658 char sspr_pl[MLXSW_REG_SSPR_LEN];
659
660 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
661 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
662}
663
Ido Schimmeld664b412016-06-09 09:51:40 +0200664static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
665 u8 local_port, u8 *p_module,
666 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200667{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200668 char pmlp_pl[MLXSW_REG_PMLP_LEN];
669 int err;
670
Ido Schimmel558c2d52016-02-26 17:32:29 +0100671 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200672 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
673 if (err)
674 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100675 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
676 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200677 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200678 return 0;
679}
680
Ido Schimmel18f1e702016-02-26 17:32:31 +0100681static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
682 u8 module, u8 width, u8 lane)
683{
684 char pmlp_pl[MLXSW_REG_PMLP_LEN];
685 int i;
686
687 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
688 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
689 for (i = 0; i < width; i++) {
690 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
691 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
692 }
693
694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
695}
696
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100697static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
698{
699 char pmlp_pl[MLXSW_REG_PMLP_LEN];
700
701 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
702 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
704}
705
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200706static int mlxsw_sp_port_open(struct net_device *dev)
707{
708 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
709 int err;
710
711 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
712 if (err)
713 return err;
714 netif_start_queue(dev);
715 return 0;
716}
717
718static int mlxsw_sp_port_stop(struct net_device *dev)
719{
720 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
721
722 netif_stop_queue(dev);
723 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
724}
725
726static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
727 struct net_device *dev)
728{
729 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
730 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
731 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
732 const struct mlxsw_tx_info tx_info = {
733 .local_port = mlxsw_sp_port->local_port,
734 .is_emad = false,
735 };
736 u64 len;
737 int err;
738
Jiri Pirko307c2432016-04-08 19:11:22 +0200739 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200740 return NETDEV_TX_BUSY;
741
742 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
743 struct sk_buff *skb_orig = skb;
744
745 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
746 if (!skb) {
747 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
748 dev_kfree_skb_any(skb_orig);
749 return NETDEV_TX_OK;
750 }
Arkadi Sharshevsky36bf38d2017-01-12 09:10:37 +0100751 dev_consume_skb_any(skb_orig);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200752 }
753
754 if (eth_skb_pad(skb)) {
755 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
756 return NETDEV_TX_OK;
757 }
758
759 mlxsw_sp_txhdr_construct(skb, &tx_info);
Nogah Frankel63dcdd32016-06-17 15:09:05 +0200760 /* TX header is consumed by HW on the way so we shouldn't count its
761 * bytes as being sent.
762 */
763 len = skb->len - MLXSW_TXHDR_LEN;
764
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200765 /* Due to a race we might fail here because of a full queue. In that
766 * unlikely case we simply drop the packet.
767 */
Jiri Pirko307c2432016-04-08 19:11:22 +0200768 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200769
770 if (!err) {
771 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
772 u64_stats_update_begin(&pcpu_stats->syncp);
773 pcpu_stats->tx_packets++;
774 pcpu_stats->tx_bytes += len;
775 u64_stats_update_end(&pcpu_stats->syncp);
776 } else {
777 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
778 dev_kfree_skb_any(skb);
779 }
780 return NETDEV_TX_OK;
781}
782
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100783static void mlxsw_sp_set_rx_mode(struct net_device *dev)
784{
785}
786
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200787static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
788{
789 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
790 struct sockaddr *addr = p;
791 int err;
792
793 if (!is_valid_ether_addr(addr->sa_data))
794 return -EADDRNOTAVAIL;
795
796 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
797 if (err)
798 return err;
799 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
800 return 0;
801}
802
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200803static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200804 bool pause_en, bool pfc_en, u16 delay)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200805{
806 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
807
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200808 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
809 MLXSW_SP_PAUSE_DELAY;
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200810
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200811 if (pause_en || pfc_en)
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200812 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200813 pg_size + delay, pg_size);
814 else
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200815 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200816}
817
818int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200819 u8 *prio_tc, bool pause_en,
820 struct ieee_pfc *my_pfc)
Ido Schimmelff6551e2016-04-06 17:10:03 +0200821{
822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200823 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
824 u16 delay = !!my_pfc ? my_pfc->delay : 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200825 char pbmc_pl[MLXSW_REG_PBMC_LEN];
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200826 int i, j, err;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200827
828 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
829 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
830 if (err)
831 return err;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200832
833 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
834 bool configure = false;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200835 bool pfc = false;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200836
837 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
838 if (prio_tc[j] == i) {
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200839 pfc = pfc_en & BIT(j);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200840 configure = true;
841 break;
842 }
843 }
844
845 if (!configure)
846 continue;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200847 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200848 }
849
Ido Schimmelff6551e2016-04-06 17:10:03 +0200850 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
851}
852
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200853static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200854 int mtu, bool pause_en)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200855{
856 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
857 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200858 struct ieee_pfc *my_pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200859 u8 *prio_tc;
860
861 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200862 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200863
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200864 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200865 pause_en, my_pfc);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200866}
867
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200868static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
869{
870 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200871 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200872 int err;
873
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200874 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200875 if (err)
876 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200877 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
878 if (err)
879 goto err_span_port_mtu_update;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200880 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
881 if (err)
882 goto err_port_mtu_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200883 dev->mtu = mtu;
884 return 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200885
886err_port_mtu_set:
Yotam Gigi763b4b72016-07-21 12:03:17 +0200887 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
888err_span_port_mtu_update:
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200889 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
Ido Schimmelff6551e2016-04-06 17:10:03 +0200890 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200891}
892
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +0300893static int
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200894mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
895 struct rtnl_link_stats64 *stats)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200896{
897 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
898 struct mlxsw_sp_port_pcpu_stats *p;
899 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
900 u32 tx_dropped = 0;
901 unsigned int start;
902 int i;
903
904 for_each_possible_cpu(i) {
905 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
906 do {
907 start = u64_stats_fetch_begin_irq(&p->syncp);
908 rx_packets = p->rx_packets;
909 rx_bytes = p->rx_bytes;
910 tx_packets = p->tx_packets;
911 tx_bytes = p->tx_bytes;
912 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
913
914 stats->rx_packets += rx_packets;
915 stats->rx_bytes += rx_bytes;
916 stats->tx_packets += tx_packets;
917 stats->tx_bytes += tx_bytes;
918 /* tx_dropped is u32, updated without syncp protection. */
919 tx_dropped += p->tx_dropped;
920 }
921 stats->tx_dropped = tx_dropped;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200922 return 0;
923}
924
Or Gerlitz3df5b3c2016-11-22 23:09:54 +0200925static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200926{
927 switch (attr_id) {
928 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
929 return true;
930 }
931
932 return false;
933}
934
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +0300935static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
936 void *sp)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200937{
938 switch (attr_id) {
939 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
940 return mlxsw_sp_port_get_sw_stats64(dev, sp);
941 }
942
943 return -EINVAL;
944}
945
946static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
947 int prio, char *ppcnt_pl)
948{
949 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
950 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
951
952 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
953 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
954}
955
956static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
957 struct rtnl_link_stats64 *stats)
958{
959 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
960 int err;
961
962 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
963 0, ppcnt_pl);
964 if (err)
965 goto out;
966
967 stats->tx_packets =
968 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
969 stats->rx_packets =
970 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
971 stats->tx_bytes =
972 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
973 stats->rx_bytes =
974 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
975 stats->multicast =
976 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
977
978 stats->rx_crc_errors =
979 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
980 stats->rx_frame_errors =
981 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
982
983 stats->rx_length_errors = (
984 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
985 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
986 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
987
988 stats->rx_errors = (stats->rx_crc_errors +
989 stats->rx_frame_errors + stats->rx_length_errors);
990
991out:
992 return err;
993}
994
995static void update_stats_cache(struct work_struct *work)
996{
997 struct mlxsw_sp_port *mlxsw_sp_port =
998 container_of(work, struct mlxsw_sp_port,
999 hw_stats.update_dw.work);
1000
1001 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1002 goto out;
1003
1004 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1005 mlxsw_sp_port->hw_stats.cache);
1006
1007out:
1008 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1009 MLXSW_HW_STATS_UPDATE_TIME);
1010}
1011
1012/* Return the stats from a cache that is updated periodically,
1013 * as this function might get called in an atomic context.
1014 */
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001015static void
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001016mlxsw_sp_port_get_stats64(struct net_device *dev,
1017 struct rtnl_link_stats64 *stats)
1018{
1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020
1021 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001022}
1023
1024int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1025 u16 vid_end, bool is_member, bool untagged)
1026{
1027 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1028 char *spvm_pl;
1029 int err;
1030
1031 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1032 if (!spvm_pl)
1033 return -ENOMEM;
1034
1035 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1036 vid_end, is_member, untagged);
1037 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1038 kfree(spvm_pl);
1039 return err;
1040}
1041
1042static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1043{
1044 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1045 u16 vid, last_visited_vid;
1046 int err;
1047
1048 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1049 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
1050 vid);
1051 if (err) {
1052 last_visited_vid = vid;
1053 goto err_port_vid_to_fid_set;
1054 }
1055 }
1056
1057 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1058 if (err) {
1059 last_visited_vid = VLAN_N_VID;
1060 goto err_port_vid_to_fid_set;
1061 }
1062
1063 return 0;
1064
1065err_port_vid_to_fid_set:
1066 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1067 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1068 vid);
1069 return err;
1070}
1071
1072static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1073{
1074 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1075 u16 vid;
1076 int err;
1077
1078 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1079 if (err)
1080 return err;
1081
1082 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1083 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1084 vid, vid);
1085 if (err)
1086 return err;
1087 }
1088
1089 return 0;
1090}
1091
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001092static struct mlxsw_sp_port *
Ido Schimmel0355b592016-06-20 23:04:13 +02001093mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001094{
1095 struct mlxsw_sp_port *mlxsw_sp_vport;
1096
1097 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1098 if (!mlxsw_sp_vport)
1099 return NULL;
1100
1101 /* dev will be set correctly after the VLAN device is linked
1102 * with the real device. In case of bridge SELF invocation, dev
1103 * will remain as is.
1104 */
1105 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1106 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1107 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1108 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
Ido Schimmel272c4472015-12-15 16:03:47 +01001109 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1110 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel0355b592016-06-20 23:04:13 +02001111 mlxsw_sp_vport->vport.vid = vid;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001112
1113 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1114
1115 return mlxsw_sp_vport;
1116}
1117
1118static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1119{
1120 list_del(&mlxsw_sp_vport->vport.list);
1121 kfree(mlxsw_sp_vport);
1122}
1123
Ido Schimmel05978482016-08-17 16:39:30 +02001124static int mlxsw_sp_port_add_vid(struct net_device *dev,
1125 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001126{
1127 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001128 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel52697a92016-07-02 11:00:09 +02001129 bool untagged = vid == 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001130 int err;
1131
1132 /* VLAN 0 is added to HW filter when device goes up, but it is
1133 * reserved in our case, so simply return.
1134 */
1135 if (!vid)
1136 return 0;
1137
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001138 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001139 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001140
Ido Schimmel0355b592016-06-20 23:04:13 +02001141 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001142 if (!mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02001143 return -ENOMEM;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001144
1145 /* When adding the first VLAN interface on a bridged port we need to
1146 * transition all the active 802.1Q bridge VLANs to use explicit
1147 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1148 */
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001149 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001150 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001151 if (err)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001152 goto err_port_vp_mode_trans;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001153 }
1154
Ido Schimmel52697a92016-07-02 11:00:09 +02001155 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001156 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001157 goto err_port_add_vid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001158
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001159 return 0;
1160
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001161err_port_add_vid:
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001162 if (list_is_singular(&mlxsw_sp_port->vports_list))
1163 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1164err_port_vp_mode_trans:
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001165 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001166 return err;
1167}
1168
Ido Schimmel32d863f2016-07-02 11:00:10 +02001169static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1170 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001171{
1172 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001173 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel1c800752016-06-20 23:04:20 +02001174 struct mlxsw_sp_fid *f;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001175
1176 /* VLAN 0 is removed from HW filter when device goes down, but
1177 * it is reserved in our case, so simply return.
1178 */
1179 if (!vid)
1180 return 0;
1181
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001182 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel7a355832016-08-17 16:39:28 +02001183 if (WARN_ON(!mlxsw_sp_vport))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001184 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001185
Ido Schimmel7a355832016-08-17 16:39:28 +02001186 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001187
Ido Schimmel1c800752016-06-20 23:04:20 +02001188 /* Drop FID reference. If this was the last reference the
1189 * resources will be freed.
1190 */
1191 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1192 if (f && !WARN_ON(!f->leave))
1193 f->leave(mlxsw_sp_vport);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001194
1195 /* When removing the last VLAN interface on a bridged port we need to
1196 * transition all active 802.1Q bridge VLANs to use VID to FID
1197 * mappings and set port's mode to VLAN mode.
1198 */
Ido Schimmel7a355832016-08-17 16:39:28 +02001199 if (list_is_singular(&mlxsw_sp_port->vports_list))
1200 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001201
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001202 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1203
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001204 return 0;
1205}
1206
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001207static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1208 size_t len)
1209{
1210 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmeld664b412016-06-09 09:51:40 +02001211 u8 module = mlxsw_sp_port->mapping.module;
1212 u8 width = mlxsw_sp_port->mapping.width;
1213 u8 lane = mlxsw_sp_port->mapping.lane;
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001214 int err;
1215
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001216 if (!mlxsw_sp_port->split)
1217 err = snprintf(name, len, "p%d", module + 1);
1218 else
1219 err = snprintf(name, len, "p%ds%d", module + 1,
1220 lane / width);
1221
1222 if (err >= len)
1223 return -EINVAL;
1224
1225 return 0;
1226}
1227
Yotam Gigi763b4b72016-07-21 12:03:17 +02001228static struct mlxsw_sp_port_mall_tc_entry *
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001229mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1230 unsigned long cookie) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001231 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1232
1233 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1234 if (mall_tc_entry->cookie == cookie)
1235 return mall_tc_entry;
1236
1237 return NULL;
1238}
1239
1240static int
1241mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001242 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001243 const struct tc_action *a,
1244 bool ingress)
1245{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001246 struct net *net = dev_net(mlxsw_sp_port->dev);
1247 enum mlxsw_sp_span_type span_type;
1248 struct mlxsw_sp_port *to_port;
1249 struct net_device *to_dev;
1250 int ifindex;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001251
1252 ifindex = tcf_mirred_ifindex(a);
1253 to_dev = __dev_get_by_index(net, ifindex);
1254 if (!to_dev) {
1255 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1256 return -EINVAL;
1257 }
1258
1259 if (!mlxsw_sp_port_dev_check(to_dev)) {
1260 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
Yotam Gigie915ac62017-01-09 11:25:48 +01001261 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001262 }
1263 to_port = netdev_priv(to_dev);
1264
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001265 mirror->to_local_port = to_port->local_port;
1266 mirror->ingress = ingress;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001267 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001268 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1269}
Yotam Gigi763b4b72016-07-21 12:03:17 +02001270
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001271static void
1272mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1273 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1274{
1275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1276 enum mlxsw_sp_span_type span_type;
1277 struct mlxsw_sp_port *to_port;
1278
1279 to_port = mlxsw_sp->ports[mirror->to_local_port];
1280 span_type = mirror->ingress ?
1281 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1282 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001283}
1284
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001285static int
1286mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1287 struct tc_cls_matchall_offload *cls,
1288 const struct tc_action *a,
1289 bool ingress)
1290{
1291 int err;
1292
1293 if (!mlxsw_sp_port->sample)
1294 return -EOPNOTSUPP;
1295 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1296 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1297 return -EEXIST;
1298 }
1299 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1300 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1301 return -EOPNOTSUPP;
1302 }
1303
1304 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1305 tcf_sample_psample_group(a));
1306 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1307 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1308 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1309
1310 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1311 if (err)
1312 goto err_port_sample_set;
1313 return 0;
1314
1315err_port_sample_set:
1316 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1317 return err;
1318}
1319
1320static void
1321mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1322{
1323 if (!mlxsw_sp_port->sample)
1324 return;
1325
1326 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1327 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1328}
1329
Yotam Gigi763b4b72016-07-21 12:03:17 +02001330static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1331 __be16 protocol,
1332 struct tc_cls_matchall_offload *cls,
1333 bool ingress)
1334{
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001335 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001336 const struct tc_action *a;
WANG Cong22dc13c2016-08-13 22:35:00 -07001337 LIST_HEAD(actions);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001338 int err;
1339
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001340 if (!tc_single_action(cls->exts)) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001341 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
Yotam Gigie915ac62017-01-09 11:25:48 +01001342 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001343 }
1344
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001345 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1346 if (!mall_tc_entry)
1347 return -ENOMEM;
1348 mall_tc_entry->cookie = cls->cookie;
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001349
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001350 tcf_exts_to_list(cls->exts, &actions);
1351 a = list_first_entry(&actions, struct tc_action, list);
1352
1353 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1354 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1355
1356 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1357 mirror = &mall_tc_entry->mirror;
1358 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1359 mirror, a, ingress);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001360 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1361 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1362 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1363 a, ingress);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001364 } else {
1365 err = -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001366 }
1367
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001368 if (err)
1369 goto err_add_action;
1370
1371 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001372 return 0;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001373
1374err_add_action:
1375 kfree(mall_tc_entry);
1376 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001377}
1378
1379static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1380 struct tc_cls_matchall_offload *cls)
1381{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001382 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001383
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001384 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1385 cls->cookie);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001386 if (!mall_tc_entry) {
1387 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1388 return;
1389 }
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001390 list_del(&mall_tc_entry->list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001391
1392 switch (mall_tc_entry->type) {
1393 case MLXSW_SP_PORT_MALL_MIRROR:
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001394 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1395 &mall_tc_entry->mirror);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001396 break;
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001397 case MLXSW_SP_PORT_MALL_SAMPLE:
1398 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1399 break;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001400 default:
1401 WARN_ON(1);
1402 }
1403
Yotam Gigi763b4b72016-07-21 12:03:17 +02001404 kfree(mall_tc_entry);
1405}
1406
1407static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1408 __be16 proto, struct tc_to_netdev *tc)
1409{
1410 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1411 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1412
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001413 switch (tc->type) {
1414 case TC_SETUP_MATCHALL:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001415 switch (tc->cls_mall->command) {
1416 case TC_CLSMATCHALL_REPLACE:
1417 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1418 proto,
1419 tc->cls_mall,
1420 ingress);
1421 case TC_CLSMATCHALL_DESTROY:
1422 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1423 tc->cls_mall);
1424 return 0;
1425 default:
1426 return -EINVAL;
1427 }
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001428 case TC_SETUP_CLSFLOWER:
1429 switch (tc->cls_flower->command) {
1430 case TC_CLSFLOWER_REPLACE:
1431 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1432 proto, tc->cls_flower);
1433 case TC_CLSFLOWER_DESTROY:
1434 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1435 tc->cls_flower);
1436 return 0;
1437 default:
1438 return -EOPNOTSUPP;
1439 }
Yotam Gigi763b4b72016-07-21 12:03:17 +02001440 }
1441
Yotam Gigie915ac62017-01-09 11:25:48 +01001442 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001443}
1444
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001445static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1446 .ndo_open = mlxsw_sp_port_open,
1447 .ndo_stop = mlxsw_sp_port_stop,
1448 .ndo_start_xmit = mlxsw_sp_port_xmit,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001449 .ndo_setup_tc = mlxsw_sp_setup_tc,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001450 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001451 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1452 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1453 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001454 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1455 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001456 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1457 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1458 .ndo_fdb_add = switchdev_port_fdb_add,
1459 .ndo_fdb_del = switchdev_port_fdb_del,
1460 .ndo_fdb_dump = switchdev_port_fdb_dump,
1461 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1462 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1463 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001464 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001465};
1466
1467static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1468 struct ethtool_drvinfo *drvinfo)
1469{
1470 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1471 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1472
1473 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1474 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1475 sizeof(drvinfo->version));
1476 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1477 "%d.%d.%d",
1478 mlxsw_sp->bus_info->fw_rev.major,
1479 mlxsw_sp->bus_info->fw_rev.minor,
1480 mlxsw_sp->bus_info->fw_rev.subminor);
1481 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1482 sizeof(drvinfo->bus_info));
1483}
1484
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001485static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1486 struct ethtool_pauseparam *pause)
1487{
1488 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1489
1490 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1491 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1492}
1493
1494static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1495 struct ethtool_pauseparam *pause)
1496{
1497 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1498
1499 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1500 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1501 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1502
1503 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1504 pfcc_pl);
1505}
1506
1507static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1508 struct ethtool_pauseparam *pause)
1509{
1510 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1511 bool pause_en = pause->tx_pause || pause->rx_pause;
1512 int err;
1513
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001514 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1515 netdev_err(dev, "PFC already enabled on port\n");
1516 return -EINVAL;
1517 }
1518
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001519 if (pause->autoneg) {
1520 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1521 return -EINVAL;
1522 }
1523
1524 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1525 if (err) {
1526 netdev_err(dev, "Failed to configure port's headroom\n");
1527 return err;
1528 }
1529
1530 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1531 if (err) {
1532 netdev_err(dev, "Failed to set PAUSE parameters\n");
1533 goto err_port_pause_configure;
1534 }
1535
1536 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1537 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1538
1539 return 0;
1540
1541err_port_pause_configure:
1542 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1543 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1544 return err;
1545}
1546
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001547struct mlxsw_sp_port_hw_stats {
1548 char str[ETH_GSTRING_LEN];
Jiri Pirko412791d2016-10-21 16:07:19 +02001549 u64 (*getter)(const char *payload);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001550};
1551
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001552static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001553 {
1554 .str = "a_frames_transmitted_ok",
1555 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1556 },
1557 {
1558 .str = "a_frames_received_ok",
1559 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1560 },
1561 {
1562 .str = "a_frame_check_sequence_errors",
1563 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1564 },
1565 {
1566 .str = "a_alignment_errors",
1567 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1568 },
1569 {
1570 .str = "a_octets_transmitted_ok",
1571 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1572 },
1573 {
1574 .str = "a_octets_received_ok",
1575 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1576 },
1577 {
1578 .str = "a_multicast_frames_xmitted_ok",
1579 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1580 },
1581 {
1582 .str = "a_broadcast_frames_xmitted_ok",
1583 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1584 },
1585 {
1586 .str = "a_multicast_frames_received_ok",
1587 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1588 },
1589 {
1590 .str = "a_broadcast_frames_received_ok",
1591 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1592 },
1593 {
1594 .str = "a_in_range_length_errors",
1595 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1596 },
1597 {
1598 .str = "a_out_of_range_length_field",
1599 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1600 },
1601 {
1602 .str = "a_frame_too_long_errors",
1603 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1604 },
1605 {
1606 .str = "a_symbol_error_during_carrier",
1607 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1608 },
1609 {
1610 .str = "a_mac_control_frames_transmitted",
1611 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1612 },
1613 {
1614 .str = "a_mac_control_frames_received",
1615 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1616 },
1617 {
1618 .str = "a_unsupported_opcodes_received",
1619 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1620 },
1621 {
1622 .str = "a_pause_mac_ctrl_frames_received",
1623 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1624 },
1625 {
1626 .str = "a_pause_mac_ctrl_frames_xmitted",
1627 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1628 },
1629};
1630
1631#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1632
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001633static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1634 {
1635 .str = "rx_octets_prio",
1636 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1637 },
1638 {
1639 .str = "rx_frames_prio",
1640 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1641 },
1642 {
1643 .str = "tx_octets_prio",
1644 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1645 },
1646 {
1647 .str = "tx_frames_prio",
1648 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1649 },
1650 {
1651 .str = "rx_pause_prio",
1652 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1653 },
1654 {
1655 .str = "rx_pause_duration_prio",
1656 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1657 },
1658 {
1659 .str = "tx_pause_prio",
1660 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1661 },
1662 {
1663 .str = "tx_pause_duration_prio",
1664 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1665 },
1666};
1667
1668#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1669
Jiri Pirko412791d2016-10-21 16:07:19 +02001670static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001671{
1672 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1673
1674 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1675}
1676
1677static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1678 {
1679 .str = "tc_transmit_queue_tc",
1680 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1681 },
1682 {
1683 .str = "tc_no_buffer_discard_uc_tc",
1684 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1685 },
1686};
1687
1688#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1689
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001690#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001691 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1692 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001693 IEEE_8021QAZ_MAX_TCS)
1694
1695static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1696{
1697 int i;
1698
1699 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1700 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1701 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1702 *p += ETH_GSTRING_LEN;
1703 }
1704}
1705
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001706static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1707{
1708 int i;
1709
1710 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1711 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1712 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1713 *p += ETH_GSTRING_LEN;
1714 }
1715}
1716
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001717static void mlxsw_sp_port_get_strings(struct net_device *dev,
1718 u32 stringset, u8 *data)
1719{
1720 u8 *p = data;
1721 int i;
1722
1723 switch (stringset) {
1724 case ETH_SS_STATS:
1725 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1726 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1727 ETH_GSTRING_LEN);
1728 p += ETH_GSTRING_LEN;
1729 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001730
1731 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1732 mlxsw_sp_port_get_prio_strings(&p, i);
1733
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001734 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1735 mlxsw_sp_port_get_tc_strings(&p, i);
1736
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001737 break;
1738 }
1739}
1740
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001741static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1742 enum ethtool_phys_id_state state)
1743{
1744 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1746 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1747 bool active;
1748
1749 switch (state) {
1750 case ETHTOOL_ID_ACTIVE:
1751 active = true;
1752 break;
1753 case ETHTOOL_ID_INACTIVE:
1754 active = false;
1755 break;
1756 default:
1757 return -EOPNOTSUPP;
1758 }
1759
1760 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1761 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1762}
1763
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001764static int
1765mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1766 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1767{
1768 switch (grp) {
1769 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1770 *p_hw_stats = mlxsw_sp_port_hw_stats;
1771 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1772 break;
1773 case MLXSW_REG_PPCNT_PRIO_CNT:
1774 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1775 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1776 break;
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001777 case MLXSW_REG_PPCNT_TC_CNT:
1778 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1779 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1780 break;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001781 default:
1782 WARN_ON(1);
Yotam Gigie915ac62017-01-09 11:25:48 +01001783 return -EOPNOTSUPP;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001784 }
1785 return 0;
1786}
1787
1788static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1789 enum mlxsw_reg_ppcnt_grp grp, int prio,
1790 u64 *data, int data_index)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001791{
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001792 struct mlxsw_sp_port_hw_stats *hw_stats;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001793 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001794 int i, len;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001795 int err;
1796
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001797 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1798 if (err)
1799 return;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001800 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001801 for (i = 0; i < len; i++)
Colin Ian Kingfaac0ff2016-09-23 12:02:45 +01001802 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001803}
1804
1805static void mlxsw_sp_port_get_stats(struct net_device *dev,
1806 struct ethtool_stats *stats, u64 *data)
1807{
1808 int i, data_index = 0;
1809
1810 /* IEEE 802.3 Counters */
1811 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1812 data, data_index);
1813 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1814
1815 /* Per-Priority Counters */
1816 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1817 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1818 data, data_index);
1819 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1820 }
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001821
1822 /* Per-TC Counters */
1823 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1824 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1825 data, data_index);
1826 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1827 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001828}
1829
1830static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1831{
1832 switch (sset) {
1833 case ETH_SS_STATS:
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001834 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001835 default:
1836 return -EOPNOTSUPP;
1837 }
1838}
1839
1840struct mlxsw_sp_port_link_mode {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001841 enum ethtool_link_mode_bit_indices mask_ethtool;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001842 u32 mask;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001843 u32 speed;
1844};
1845
1846static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1847 {
1848 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001849 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1850 .speed = SPEED_100,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001851 },
1852 {
1853 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1854 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001855 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1856 .speed = SPEED_1000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001857 },
1858 {
1859 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001860 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1861 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001862 },
1863 {
1864 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1865 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001866 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1867 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001868 },
1869 {
1870 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1871 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1872 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1873 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001874 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1875 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001876 },
1877 {
1878 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001879 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1880 .speed = SPEED_20000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001881 },
1882 {
1883 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001884 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1885 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001886 },
1887 {
1888 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001889 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1890 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001891 },
1892 {
1893 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001894 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1895 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001896 },
1897 {
1898 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001899 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1900 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001901 },
1902 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001903 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1904 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1905 .speed = SPEED_25000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001906 },
1907 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001908 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1909 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1910 .speed = SPEED_25000,
1911 },
1912 {
1913 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1914 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1915 .speed = SPEED_25000,
1916 },
1917 {
1918 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1919 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1920 .speed = SPEED_25000,
1921 },
1922 {
1923 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1924 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1925 .speed = SPEED_50000,
1926 },
1927 {
1928 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1929 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1930 .speed = SPEED_50000,
1931 },
1932 {
1933 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1934 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1935 .speed = SPEED_50000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001936 },
1937 {
1938 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001939 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1940 .speed = SPEED_56000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001941 },
1942 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001943 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1944 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1945 .speed = SPEED_56000,
1946 },
1947 {
1948 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1949 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1950 .speed = SPEED_56000,
1951 },
1952 {
1953 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1954 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
1955 .speed = SPEED_56000,
1956 },
1957 {
1958 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
1959 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1960 .speed = SPEED_100000,
1961 },
1962 {
1963 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
1964 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1965 .speed = SPEED_100000,
1966 },
1967 {
1968 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
1969 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1970 .speed = SPEED_100000,
1971 },
1972 {
1973 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1974 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1975 .speed = SPEED_100000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001976 },
1977};
1978
1979#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1980
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001981static void
1982mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
1983 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001984{
1985 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1986 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1987 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1988 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1989 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1990 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001991 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001992
1993 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1994 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1995 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1996 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1997 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001998 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001999}
2000
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002001static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002002{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002003 int i;
2004
2005 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2006 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002007 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2008 mode);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002009 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002010}
2011
2012static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002013 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002014{
2015 u32 speed = SPEED_UNKNOWN;
2016 u8 duplex = DUPLEX_UNKNOWN;
2017 int i;
2018
2019 if (!carrier_ok)
2020 goto out;
2021
2022 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2023 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2024 speed = mlxsw_sp_port_link_mode[i].speed;
2025 duplex = DUPLEX_FULL;
2026 break;
2027 }
2028 }
2029out:
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002030 cmd->base.speed = speed;
2031 cmd->base.duplex = duplex;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002032}
2033
2034static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2035{
2036 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2037 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2038 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2039 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2040 return PORT_FIBRE;
2041
2042 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2043 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2044 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2045 return PORT_DA;
2046
2047 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2048 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2049 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2050 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2051 return PORT_NONE;
2052
2053 return PORT_OTHER;
2054}
2055
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002056static u32
2057mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002058{
2059 u32 ptys_proto = 0;
2060 int i;
2061
2062 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002063 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2064 cmd->link_modes.advertising))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002065 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2066 }
2067 return ptys_proto;
2068}
2069
2070static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2071{
2072 u32 ptys_proto = 0;
2073 int i;
2074
2075 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2076 if (speed == mlxsw_sp_port_link_mode[i].speed)
2077 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2078 }
2079 return ptys_proto;
2080}
2081
Ido Schimmel18f1e702016-02-26 17:32:31 +01002082static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2083{
2084 u32 ptys_proto = 0;
2085 int i;
2086
2087 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2088 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2089 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2090 }
2091 return ptys_proto;
2092}
2093
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002094static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2095 struct ethtool_link_ksettings *cmd)
2096{
2097 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2098 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2099 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2100
2101 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2102 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2103}
2104
2105static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2106 struct ethtool_link_ksettings *cmd)
2107{
2108 if (!autoneg)
2109 return;
2110
2111 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2112 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2113}
2114
2115static void
2116mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2117 struct ethtool_link_ksettings *cmd)
2118{
2119 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2120 return;
2121
2122 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2123 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2124}
2125
2126static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2127 struct ethtool_link_ksettings *cmd)
2128{
2129 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2130 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2132 char ptys_pl[MLXSW_REG_PTYS_LEN];
2133 u8 autoneg_status;
2134 bool autoneg;
2135 int err;
2136
2137 autoneg = mlxsw_sp_port->link.autoneg;
Elad Raz401c8b42016-10-28 21:35:52 +02002138 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002139 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2140 if (err)
2141 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002142 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2143 &eth_proto_oper);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002144
2145 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2146
2147 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2148
2149 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2150 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2151 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2152
2153 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2154 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2155 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2156 cmd);
2157
2158 return 0;
2159}
2160
2161static int
2162mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2163 const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002164{
2165 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2166 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2167 char ptys_pl[MLXSW_REG_PTYS_LEN];
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002168 u32 eth_proto_cap, eth_proto_new;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002169 bool autoneg;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002170 int err;
2171
Elad Raz401c8b42016-10-28 21:35:52 +02002172 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002173 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002174 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002175 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002176 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002177
2178 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2179 eth_proto_new = autoneg ?
2180 mlxsw_sp_to_ptys_advert_link(cmd) :
2181 mlxsw_sp_to_ptys_speed(cmd->base.speed);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002182
2183 eth_proto_new = eth_proto_new & eth_proto_cap;
2184 if (!eth_proto_new) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002185 netdev_err(dev, "No supported speed requested\n");
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002186 return -EINVAL;
2187 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002188
Elad Raz401c8b42016-10-28 21:35:52 +02002189 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2190 eth_proto_new);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002192 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002193 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002194
Ido Schimmel6277d462016-07-15 11:14:58 +02002195 if (!netif_running(dev))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002196 return 0;
2197
Ido Schimmel0c83f882016-09-12 13:26:23 +02002198 mlxsw_sp_port->link.autoneg = autoneg;
2199
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002200 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2201 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002202
2203 return 0;
2204}
2205
2206static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2207 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2208 .get_link = ethtool_op_get_link,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02002209 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2210 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002211 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002212 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002213 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2214 .get_sset_count = mlxsw_sp_port_get_sset_count,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002215 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2216 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002217};
2218
Ido Schimmel18f1e702016-02-26 17:32:31 +01002219static int
2220mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2221{
2222 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2223 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2224 char ptys_pl[MLXSW_REG_PTYS_LEN];
2225 u32 eth_proto_admin;
2226
2227 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
Elad Raz401c8b42016-10-28 21:35:52 +02002228 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2229 eth_proto_admin);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002230 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2231}
2232
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002233int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2234 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2235 bool dwrr, u8 dwrr_weight)
Ido Schimmel90183b92016-04-06 17:10:08 +02002236{
2237 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2238 char qeec_pl[MLXSW_REG_QEEC_LEN];
2239
2240 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2241 next_index);
2242 mlxsw_reg_qeec_de_set(qeec_pl, true);
2243 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2244 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2245 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2246}
2247
Ido Schimmelcc7cf512016-04-06 17:10:11 +02002248int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2249 enum mlxsw_reg_qeec_hr hr, u8 index,
2250 u8 next_index, u32 maxrate)
Ido Schimmel90183b92016-04-06 17:10:08 +02002251{
2252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2253 char qeec_pl[MLXSW_REG_QEEC_LEN];
2254
2255 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2256 next_index);
2257 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2258 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2260}
2261
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002262int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2263 u8 switch_prio, u8 tclass)
Ido Schimmel90183b92016-04-06 17:10:08 +02002264{
2265 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2266 char qtct_pl[MLXSW_REG_QTCT_LEN];
2267
2268 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2269 tclass);
2270 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2271}
2272
2273static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2274{
2275 int err, i;
2276
2277 /* Setup the elements hierarcy, so that each TC is linked to
2278 * one subgroup, which are all member in the same group.
2279 */
2280 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2281 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2282 0);
2283 if (err)
2284 return err;
2285 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2286 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2287 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2288 0, false, 0);
2289 if (err)
2290 return err;
2291 }
2292 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2293 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2294 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2295 false, 0);
2296 if (err)
2297 return err;
2298 }
2299
2300 /* Make sure the max shaper is disabled in all hierarcies that
2301 * support it.
2302 */
2303 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2304 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2305 MLXSW_REG_QEEC_MAS_DIS);
2306 if (err)
2307 return err;
2308 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2309 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2310 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2311 i, 0,
2312 MLXSW_REG_QEEC_MAS_DIS);
2313 if (err)
2314 return err;
2315 }
2316 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2317 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2318 MLXSW_REG_QEEC_HIERARCY_TC,
2319 i, i,
2320 MLXSW_REG_QEEC_MAS_DIS);
2321 if (err)
2322 return err;
2323 }
2324
2325 /* Map all priorities to traffic class 0. */
2326 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2327 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2328 if (err)
2329 return err;
2330 }
2331
2332 return 0;
2333}
2334
Ido Schimmel05978482016-08-17 16:39:30 +02002335static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2336{
2337 mlxsw_sp_port->pvid = 1;
2338
2339 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2340}
2341
2342static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2343{
2344 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2345}
2346
Jiri Pirko67963a32016-10-28 21:35:55 +02002347static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2348 bool split, u8 module, u8 width, u8 lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002349{
2350 struct mlxsw_sp_port *mlxsw_sp_port;
2351 struct net_device *dev;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002352 size_t bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002353 int err;
2354
2355 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2356 if (!dev)
2357 return -ENOMEM;
Jiri Pirkof20a91f2016-10-27 15:13:00 +02002358 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002359 mlxsw_sp_port = netdev_priv(dev);
2360 mlxsw_sp_port->dev = dev;
2361 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2362 mlxsw_sp_port->local_port = local_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002363 mlxsw_sp_port->split = split;
Ido Schimmeld664b412016-06-09 09:51:40 +02002364 mlxsw_sp_port->mapping.module = module;
2365 mlxsw_sp_port->mapping.width = width;
2366 mlxsw_sp_port->mapping.lane = lane;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002367 mlxsw_sp_port->link.autoneg = 1;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002368 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2369 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2370 if (!mlxsw_sp_port->active_vlans) {
2371 err = -ENOMEM;
2372 goto err_port_active_vlans_alloc;
2373 }
Elad Razfc1273a2016-01-06 13:01:11 +01002374 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2375 if (!mlxsw_sp_port->untagged_vlans) {
2376 err = -ENOMEM;
2377 goto err_port_untagged_vlans_alloc;
2378 }
Ido Schimmel7f71eb42015-12-15 16:03:37 +01002379 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02002380 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002381
2382 mlxsw_sp_port->pcpu_stats =
2383 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2384 if (!mlxsw_sp_port->pcpu_stats) {
2385 err = -ENOMEM;
2386 goto err_alloc_stats;
2387 }
2388
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002389 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2390 GFP_KERNEL);
2391 if (!mlxsw_sp_port->sample) {
2392 err = -ENOMEM;
2393 goto err_alloc_sample;
2394 }
2395
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002396 mlxsw_sp_port->hw_stats.cache =
2397 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2398
2399 if (!mlxsw_sp_port->hw_stats.cache) {
2400 err = -ENOMEM;
2401 goto err_alloc_hw_stats;
2402 }
2403 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2404 &update_stats_cache);
2405
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002406 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2407 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2408
Ido Schimmel3247ff22016-09-08 08:16:02 +02002409 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2410 if (err) {
2411 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2412 mlxsw_sp_port->local_port);
2413 goto err_port_swid_set;
2414 }
2415
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002416 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2417 if (err) {
2418 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2419 mlxsw_sp_port->local_port);
2420 goto err_dev_addr_init;
2421 }
2422
2423 netif_carrier_off(dev);
2424
2425 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
Yotam Gigi763b4b72016-07-21 12:03:17 +02002426 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2427 dev->hw_features |= NETIF_F_HW_TC;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002428
Jarod Wilsond894be52016-10-20 13:55:16 -04002429 dev->min_mtu = 0;
2430 dev->max_mtu = ETH_MAX_MTU;
2431
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002432 /* Each packet needs to have a Tx header (metadata) on top all other
2433 * headers.
2434 */
Yotam Gigifeb7d382016-10-04 09:46:04 +02002435 dev->needed_headroom = MLXSW_TXHDR_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002436
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002437 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2438 if (err) {
2439 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2440 mlxsw_sp_port->local_port);
2441 goto err_port_system_port_mapping_set;
2442 }
2443
Ido Schimmel18f1e702016-02-26 17:32:31 +01002444 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2445 if (err) {
2446 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2447 mlxsw_sp_port->local_port);
2448 goto err_port_speed_by_width_set;
2449 }
2450
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002451 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2452 if (err) {
2453 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2454 mlxsw_sp_port->local_port);
2455 goto err_port_mtu_set;
2456 }
2457
2458 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2459 if (err)
2460 goto err_port_admin_status_set;
2461
2462 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2463 if (err) {
2464 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2465 mlxsw_sp_port->local_port);
2466 goto err_port_buffers_init;
2467 }
2468
Ido Schimmel90183b92016-04-06 17:10:08 +02002469 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2470 if (err) {
2471 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2472 mlxsw_sp_port->local_port);
2473 goto err_port_ets_init;
2474 }
2475
Ido Schimmelf00817d2016-04-06 17:10:09 +02002476 /* ETS and buffers must be initialized before DCB. */
2477 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2478 if (err) {
2479 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2480 mlxsw_sp_port->local_port);
2481 goto err_port_dcb_init;
2482 }
2483
Ido Schimmel05978482016-08-17 16:39:30 +02002484 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2485 if (err) {
2486 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2487 mlxsw_sp_port->local_port);
2488 goto err_port_pvid_vport_create;
2489 }
2490
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002491 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
Ido Schimmel2f258442016-08-17 16:39:31 +02002492 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002493 err = register_netdev(dev);
2494 if (err) {
2495 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2496 mlxsw_sp_port->local_port);
2497 goto err_register_netdev;
2498 }
2499
Elad Razd808c7e2016-10-28 21:35:57 +02002500 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2501 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2502 module);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002503 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002504 return 0;
2505
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002506err_register_netdev:
Ido Schimmel2f258442016-08-17 16:39:31 +02002507 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002508 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmel05978482016-08-17 16:39:30 +02002509 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2510err_port_pvid_vport_create:
Ido Schimmel4de34eb2016-08-04 17:36:22 +03002511 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002512err_port_dcb_init:
Ido Schimmel90183b92016-04-06 17:10:08 +02002513err_port_ets_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002514err_port_buffers_init:
2515err_port_admin_status_set:
2516err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01002517err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002518err_port_system_port_mapping_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002519err_dev_addr_init:
Ido Schimmel3247ff22016-09-08 08:16:02 +02002520 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2521err_port_swid_set:
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002522 kfree(mlxsw_sp_port->hw_stats.cache);
2523err_alloc_hw_stats:
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002524 kfree(mlxsw_sp_port->sample);
2525err_alloc_sample:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002526 free_percpu(mlxsw_sp_port->pcpu_stats);
2527err_alloc_stats:
Elad Razfc1273a2016-01-06 13:01:11 +01002528 kfree(mlxsw_sp_port->untagged_vlans);
2529err_port_untagged_vlans_alloc:
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002530 kfree(mlxsw_sp_port->active_vlans);
2531err_port_active_vlans_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002532 free_netdev(dev);
2533 return err;
2534}
2535
Jiri Pirko67963a32016-10-28 21:35:55 +02002536static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2537 bool split, u8 module, u8 width, u8 lane)
2538{
2539 int err;
2540
2541 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2542 if (err) {
2543 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2544 local_port);
2545 return err;
2546 }
Ido Schimmel9a60c902016-12-16 19:29:03 +01002547 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
Jiri Pirko67963a32016-10-28 21:35:55 +02002548 module, width, lane);
2549 if (err)
2550 goto err_port_create;
2551 return 0;
2552
2553err_port_create:
2554 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2555 return err;
2556}
2557
2558static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002559{
2560 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2561
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002562 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
Jiri Pirko67963a32016-10-28 21:35:55 +02002563 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002564 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Ido Schimmel2f258442016-08-17 16:39:31 +02002565 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002566 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmel05978482016-08-17 16:39:30 +02002567 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002568 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01002569 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2570 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002571 kfree(mlxsw_sp_port->hw_stats.cache);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002572 kfree(mlxsw_sp_port->sample);
Yotam Gigi136f1442017-01-09 11:25:47 +01002573 free_percpu(mlxsw_sp_port->pcpu_stats);
Elad Razfc1273a2016-01-06 13:01:11 +01002574 kfree(mlxsw_sp_port->untagged_vlans);
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002575 kfree(mlxsw_sp_port->active_vlans);
Ido Schimmel32d863f2016-07-02 11:00:10 +02002576 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002577 free_netdev(mlxsw_sp_port->dev);
2578}
2579
Jiri Pirko67963a32016-10-28 21:35:55 +02002580static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2581{
2582 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2583 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2584}
2585
Jiri Pirkof83e2102016-10-28 21:35:49 +02002586static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2587{
2588 return mlxsw_sp->ports[local_port] != NULL;
2589}
2590
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002591static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2592{
2593 int i;
2594
2595 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002596 if (mlxsw_sp_port_created(mlxsw_sp, i))
2597 mlxsw_sp_port_remove(mlxsw_sp, i);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002598 kfree(mlxsw_sp->ports);
2599}
2600
2601static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2602{
Ido Schimmeld664b412016-06-09 09:51:40 +02002603 u8 module, width, lane;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002604 size_t alloc_size;
2605 int i;
2606 int err;
2607
2608 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
2609 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2610 if (!mlxsw_sp->ports)
2611 return -ENOMEM;
2612
2613 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01002614 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002615 &width, &lane);
Ido Schimmel558c2d52016-02-26 17:32:29 +01002616 if (err)
2617 goto err_port_module_info_get;
2618 if (!width)
2619 continue;
2620 mlxsw_sp->port_to_module[i] = module;
Jiri Pirko67963a32016-10-28 21:35:55 +02002621 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2622 module, width, lane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002623 if (err)
2624 goto err_port_create;
2625 }
2626 return 0;
2627
2628err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01002629err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002630 for (i--; i >= 1; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002631 if (mlxsw_sp_port_created(mlxsw_sp, i))
2632 mlxsw_sp_port_remove(mlxsw_sp, i);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002633 kfree(mlxsw_sp->ports);
2634 return err;
2635}
2636
Ido Schimmel18f1e702016-02-26 17:32:31 +01002637static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2638{
2639 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2640
2641 return local_port - offset;
2642}
2643
Ido Schimmelbe945352016-06-09 09:51:39 +02002644static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2645 u8 module, unsigned int count)
2646{
2647 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2648 int err, i;
2649
2650 for (i = 0; i < count; i++) {
2651 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2652 width, i * width);
2653 if (err)
2654 goto err_port_module_map;
2655 }
2656
2657 for (i = 0; i < count; i++) {
2658 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2659 if (err)
2660 goto err_port_swid_set;
2661 }
2662
2663 for (i = 0; i < count; i++) {
2664 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
Ido Schimmeld664b412016-06-09 09:51:40 +02002665 module, width, i * width);
Ido Schimmelbe945352016-06-09 09:51:39 +02002666 if (err)
2667 goto err_port_create;
2668 }
2669
2670 return 0;
2671
2672err_port_create:
2673 for (i--; i >= 0; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002674 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2675 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmelbe945352016-06-09 09:51:39 +02002676 i = count;
2677err_port_swid_set:
2678 for (i--; i >= 0; i--)
2679 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2680 MLXSW_PORT_SWID_DISABLED_PORT);
2681 i = count;
2682err_port_module_map:
2683 for (i--; i >= 0; i--)
2684 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2685 return err;
2686}
2687
2688static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2689 u8 base_port, unsigned int count)
2690{
2691 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2692 int i;
2693
2694 /* Split by four means we need to re-create two ports, otherwise
2695 * only one.
2696 */
2697 count = count / 2;
2698
2699 for (i = 0; i < count; i++) {
2700 local_port = base_port + i * 2;
2701 module = mlxsw_sp->port_to_module[local_port];
2702
2703 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2704 0);
2705 }
2706
2707 for (i = 0; i < count; i++)
2708 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2709
2710 for (i = 0; i < count; i++) {
2711 local_port = base_port + i * 2;
2712 module = mlxsw_sp->port_to_module[local_port];
2713
2714 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002715 width, 0);
Ido Schimmelbe945352016-06-09 09:51:39 +02002716 }
2717}
2718
Jiri Pirkob2f10572016-04-08 19:11:23 +02002719static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2720 unsigned int count)
Ido Schimmel18f1e702016-02-26 17:32:31 +01002721{
Jiri Pirkob2f10572016-04-08 19:11:23 +02002722 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002723 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002724 u8 module, cur_width, base_port;
2725 int i;
2726 int err;
2727
2728 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2729 if (!mlxsw_sp_port) {
2730 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2731 local_port);
2732 return -EINVAL;
2733 }
2734
Ido Schimmeld664b412016-06-09 09:51:40 +02002735 module = mlxsw_sp_port->mapping.module;
2736 cur_width = mlxsw_sp_port->mapping.width;
2737
Ido Schimmel18f1e702016-02-26 17:32:31 +01002738 if (count != 2 && count != 4) {
2739 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2740 return -EINVAL;
2741 }
2742
Ido Schimmel18f1e702016-02-26 17:32:31 +01002743 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2744 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2745 return -EINVAL;
2746 }
2747
2748 /* Make sure we have enough slave (even) ports for the split. */
2749 if (count == 2) {
2750 base_port = local_port;
2751 if (mlxsw_sp->ports[base_port + 1]) {
2752 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2753 return -EINVAL;
2754 }
2755 } else {
2756 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2757 if (mlxsw_sp->ports[base_port + 1] ||
2758 mlxsw_sp->ports[base_port + 3]) {
2759 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2760 return -EINVAL;
2761 }
2762 }
2763
2764 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002765 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2766 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002767
Ido Schimmelbe945352016-06-09 09:51:39 +02002768 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2769 if (err) {
2770 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2771 goto err_port_split_create;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002772 }
2773
2774 return 0;
2775
Ido Schimmelbe945352016-06-09 09:51:39 +02002776err_port_split_create:
2777 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002778 return err;
2779}
2780
Jiri Pirkob2f10572016-04-08 19:11:23 +02002781static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
Ido Schimmel18f1e702016-02-26 17:32:31 +01002782{
Jiri Pirkob2f10572016-04-08 19:11:23 +02002783 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002784 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmeld664b412016-06-09 09:51:40 +02002785 u8 cur_width, base_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002786 unsigned int count;
2787 int i;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002788
2789 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2790 if (!mlxsw_sp_port) {
2791 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2792 local_port);
2793 return -EINVAL;
2794 }
2795
2796 if (!mlxsw_sp_port->split) {
2797 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2798 return -EINVAL;
2799 }
2800
Ido Schimmeld664b412016-06-09 09:51:40 +02002801 cur_width = mlxsw_sp_port->mapping.width;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002802 count = cur_width == 1 ? 4 : 2;
2803
2804 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2805
2806 /* Determine which ports to remove. */
2807 if (count == 2 && local_port >= base_port + 2)
2808 base_port = base_port + 2;
2809
2810 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002811 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2812 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002813
Ido Schimmelbe945352016-06-09 09:51:39 +02002814 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002815
2816 return 0;
2817}
2818
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002819static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2820 char *pude_pl, void *priv)
2821{
2822 struct mlxsw_sp *mlxsw_sp = priv;
2823 struct mlxsw_sp_port *mlxsw_sp_port;
2824 enum mlxsw_reg_pude_oper_status status;
2825 u8 local_port;
2826
2827 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2828 mlxsw_sp_port = mlxsw_sp->ports[local_port];
Ido Schimmelbbf2a472016-07-02 11:00:14 +02002829 if (!mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002830 return;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002831
2832 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2833 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2834 netdev_info(mlxsw_sp_port->dev, "link up\n");
2835 netif_carrier_on(mlxsw_sp_port->dev);
2836 } else {
2837 netdev_info(mlxsw_sp_port->dev, "link down\n");
2838 netif_carrier_off(mlxsw_sp_port->dev);
2839 }
2840}
2841
Nogah Frankel14eeda92016-11-25 10:33:32 +01002842static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2843 u8 local_port, void *priv)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002844{
2845 struct mlxsw_sp *mlxsw_sp = priv;
2846 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2847 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2848
2849 if (unlikely(!mlxsw_sp_port)) {
2850 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2851 local_port);
2852 return;
2853 }
2854
2855 skb->dev = mlxsw_sp_port->dev;
2856
2857 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2858 u64_stats_update_begin(&pcpu_stats->syncp);
2859 pcpu_stats->rx_packets++;
2860 pcpu_stats->rx_bytes += skb->len;
2861 u64_stats_update_end(&pcpu_stats->syncp);
2862
2863 skb->protocol = eth_type_trans(skb, skb->dev);
2864 netif_receive_skb(skb);
2865}
2866
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02002867static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2868 void *priv)
2869{
2870 skb->offload_fwd_mark = 1;
Nogah Frankel14eeda92016-11-25 10:33:32 +01002871 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02002872}
2873
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002874static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
2875 void *priv)
2876{
2877 struct mlxsw_sp *mlxsw_sp = priv;
2878 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2879 struct psample_group *psample_group;
2880 u32 size;
2881
2882 if (unlikely(!mlxsw_sp_port)) {
2883 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2884 local_port);
2885 goto out;
2886 }
2887 if (unlikely(!mlxsw_sp_port->sample)) {
2888 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
2889 local_port);
2890 goto out;
2891 }
2892
2893 size = mlxsw_sp_port->sample->truncate ?
2894 mlxsw_sp_port->sample->trunc_size : skb->len;
2895
2896 rcu_read_lock();
2897 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
2898 if (!psample_group)
2899 goto out_unlock;
2900 psample_sample_packet(psample_group, skb, size,
2901 mlxsw_sp_port->dev->ifindex, 0,
2902 mlxsw_sp_port->sample->rate);
2903out_unlock:
2904 rcu_read_unlock();
2905out:
2906 consume_skb(skb);
2907}
2908
Nogah Frankel117b0da2016-11-25 10:33:44 +01002909#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel0fb78a42016-11-25 10:33:39 +01002910 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01002911 _is_ctrl, SP_##_trap_group, DISCARD)
Ido Schimmel93393b32016-08-25 18:42:38 +02002912
Nogah Frankel117b0da2016-11-25 10:33:44 +01002913#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel14eeda92016-11-25 10:33:32 +01002914 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01002915 _is_ctrl, SP_##_trap_group, DISCARD)
2916
2917#define MLXSW_SP_EVENTL(_func, _trap_id) \
2918 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
Nogah Frankel14eeda92016-11-25 10:33:32 +01002919
Nogah Frankel45449132016-11-25 10:33:35 +01002920static const struct mlxsw_listener mlxsw_sp_listener[] = {
2921 /* Events */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002922 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
Nogah Frankelee4a60d2016-11-25 10:33:29 +01002923 /* L2 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002924 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2925 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2926 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2927 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2928 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2929 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2930 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2931 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2932 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2933 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2934 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
Ido Schimmel93393b32016-08-25 18:42:38 +02002935 /* L3 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002936 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2937 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2938 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2939 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2940 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2941 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2942 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2943 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002944 /* PKT Sample trap */
2945 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
2946 false, SP_IP2ME, DISCARD)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002947};
2948
Nogah Frankel9148e7c2016-11-25 10:33:47 +01002949static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2950{
2951 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2952 enum mlxsw_reg_qpcr_ir_units ir_units;
2953 int max_cpu_policers;
2954 bool is_bytes;
2955 u8 burst_size;
2956 u32 rate;
2957 int i, err;
2958
2959 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2960 return -EIO;
2961
2962 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2963
2964 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2965 for (i = 0; i < max_cpu_policers; i++) {
2966 is_bytes = false;
2967 switch (i) {
2968 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
2969 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
2970 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
2971 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
2972 rate = 128;
2973 burst_size = 7;
2974 break;
2975 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
2976 rate = 16 * 1024;
2977 burst_size = 10;
2978 break;
2979 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
2980 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
2981 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
2982 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
2983 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2984 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
2985 rate = 1024;
2986 burst_size = 7;
2987 break;
2988 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
2989 is_bytes = true;
2990 rate = 4 * 1024;
2991 burst_size = 4;
2992 break;
2993 default:
2994 continue;
2995 }
2996
2997 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2998 burst_size);
2999 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3000 if (err)
3001 return err;
3002 }
3003
3004 return 0;
3005}
3006
Nogah Frankel579c82e2016-11-25 10:33:42 +01003007static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003008{
3009 char htgt_pl[MLXSW_REG_HTGT_LEN];
Nogah Frankel117b0da2016-11-25 10:33:44 +01003010 enum mlxsw_reg_htgt_trap_group i;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003011 int max_cpu_policers;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003012 int max_trap_groups;
3013 u8 priority, tc;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003014 u16 policer_id;
Nogah Frankel117b0da2016-11-25 10:33:44 +01003015 int err;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003016
3017 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3018 return -EIO;
3019
3020 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003021 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003022
3023 for (i = 0; i < max_trap_groups; i++) {
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003024 policer_id = i;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003025 switch (i) {
Nogah Frankel117b0da2016-11-25 10:33:44 +01003026 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3027 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3028 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3029 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3030 priority = 5;
3031 tc = 5;
3032 break;
3033 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3034 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3035 priority = 4;
3036 tc = 4;
3037 break;
3038 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3039 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3040 priority = 3;
3041 tc = 3;
3042 break;
3043 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3044 priority = 2;
3045 tc = 2;
3046 break;
3047 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3048 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3049 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3050 priority = 1;
3051 tc = 1;
3052 break;
3053 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
Nogah Frankel579c82e2016-11-25 10:33:42 +01003054 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3055 tc = MLXSW_REG_HTGT_DEFAULT_TC;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003056 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003057 break;
3058 default:
3059 continue;
3060 }
Nogah Frankel117b0da2016-11-25 10:33:44 +01003061
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003062 if (max_cpu_policers <= policer_id &&
3063 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3064 return -EIO;
3065
3066 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003067 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3068 if (err)
3069 return err;
3070 }
3071
3072 return 0;
3073}
3074
3075static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3076{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003077 int i;
3078 int err;
3079
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003080 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3081 if (err)
3082 return err;
3083
Nogah Frankel579c82e2016-11-25 10:33:42 +01003084 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003085 if (err)
3086 return err;
3087
Nogah Frankel45449132016-11-25 10:33:35 +01003088 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003089 err = mlxsw_core_trap_register(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003090 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003091 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003092 if (err)
Nogah Frankel45449132016-11-25 10:33:35 +01003093 goto err_listener_register;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003094
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003095 }
3096 return 0;
3097
Nogah Frankel45449132016-11-25 10:33:35 +01003098err_listener_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003099 for (i--; i >= 0; i--) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003100 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003101 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003102 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003103 }
3104 return err;
3105}
3106
3107static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3108{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003109 int i;
3110
Nogah Frankel45449132016-11-25 10:33:35 +01003111 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003112 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003113 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003114 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003115 }
3116}
3117
3118static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3119 enum mlxsw_reg_sfgc_type type,
3120 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3121{
3122 enum mlxsw_flood_table_type table_type;
3123 enum mlxsw_sp_flood_table flood_table;
3124 char sfgc_pl[MLXSW_REG_SFGC_LEN];
3125
Ido Schimmel19ae6122015-12-15 16:03:39 +01003126 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003127 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
Ido Schimmel19ae6122015-12-15 16:03:39 +01003128 else
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003129 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
Ido Schimmel19ae6122015-12-15 16:03:39 +01003130
Nogah Frankel71c365b2017-02-09 14:54:46 +01003131 switch (type) {
3132 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
Ido Schimmel19ae6122015-12-15 16:03:39 +01003133 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003134 break;
3135 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
Nogah Frankel71c365b2017-02-09 14:54:46 +01003136 flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3137 break;
3138 default:
3139 flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3140 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003141
3142 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3143 flood_table);
3144 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3145}
3146
3147static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3148{
3149 int type, err;
3150
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003151 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3152 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3153 continue;
3154
3155 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3156 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3157 if (err)
3158 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003159
3160 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3161 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3162 if (err)
3163 return err;
3164 }
3165
3166 return 0;
3167}
3168
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003169static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3170{
3171 char slcr_pl[MLXSW_REG_SLCR_LEN];
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003172 int err;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003173
3174 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3175 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3176 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3177 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3178 MLXSW_REG_SLCR_LAG_HASH_SIP |
3179 MLXSW_REG_SLCR_LAG_HASH_DIP |
3180 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3181 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3182 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003183 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3184 if (err)
3185 return err;
3186
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003187 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3188 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003189 return -EIO;
3190
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003191 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003192 sizeof(struct mlxsw_sp_upper),
3193 GFP_KERNEL);
3194 if (!mlxsw_sp->lags)
3195 return -ENOMEM;
3196
3197 return 0;
3198}
3199
3200static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3201{
3202 kfree(mlxsw_sp->lags);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003203}
3204
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003205static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3206{
3207 char htgt_pl[MLXSW_REG_HTGT_LEN];
3208
Nogah Frankel579c82e2016-11-25 10:33:42 +01003209 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3210 MLXSW_REG_HTGT_INVALID_POLICER,
3211 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3212 MLXSW_REG_HTGT_DEFAULT_TC);
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003213 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3214}
3215
Jiri Pirkob2f10572016-04-08 19:11:23 +02003216static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003217 const struct mlxsw_bus_info *mlxsw_bus_info)
3218{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003219 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003220 int err;
3221
3222 mlxsw_sp->core = mlxsw_core;
3223 mlxsw_sp->bus_info = mlxsw_bus_info;
Ido Schimmel14d39462016-06-20 23:04:15 +02003224 INIT_LIST_HEAD(&mlxsw_sp->fids);
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02003225 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
Elad Raz3a49b4f2016-01-10 21:06:28 +01003226 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003227
3228 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3229 if (err) {
3230 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3231 return err;
3232 }
3233
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003234 err = mlxsw_sp_traps_init(mlxsw_sp);
3235 if (err) {
Nogah Frankel45449132016-11-25 10:33:35 +01003236 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3237 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003238 }
3239
3240 err = mlxsw_sp_flood_init(mlxsw_sp);
3241 if (err) {
3242 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3243 goto err_flood_init;
3244 }
3245
3246 err = mlxsw_sp_buffers_init(mlxsw_sp);
3247 if (err) {
3248 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3249 goto err_buffers_init;
3250 }
3251
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003252 err = mlxsw_sp_lag_init(mlxsw_sp);
3253 if (err) {
3254 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3255 goto err_lag_init;
3256 }
3257
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003258 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3259 if (err) {
3260 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3261 goto err_switchdev_init;
3262 }
3263
Ido Schimmel464dce12016-07-02 11:00:15 +02003264 err = mlxsw_sp_router_init(mlxsw_sp);
3265 if (err) {
3266 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3267 goto err_router_init;
3268 }
3269
Yotam Gigi763b4b72016-07-21 12:03:17 +02003270 err = mlxsw_sp_span_init(mlxsw_sp);
3271 if (err) {
3272 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3273 goto err_span_init;
3274 }
3275
Jiri Pirko22a67762017-02-03 10:29:07 +01003276 err = mlxsw_sp_acl_init(mlxsw_sp);
3277 if (err) {
3278 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3279 goto err_acl_init;
3280 }
3281
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003282 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3283 if (err) {
3284 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3285 goto err_counter_pool_init;
3286 }
3287
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003288 err = mlxsw_sp_ports_create(mlxsw_sp);
3289 if (err) {
3290 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3291 goto err_ports_create;
3292 }
3293
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003294 return 0;
3295
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003296err_ports_create:
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003297 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3298err_counter_pool_init:
Jiri Pirko22a67762017-02-03 10:29:07 +01003299 mlxsw_sp_acl_fini(mlxsw_sp);
3300err_acl_init:
Yotam Gigi763b4b72016-07-21 12:03:17 +02003301 mlxsw_sp_span_fini(mlxsw_sp);
3302err_span_init:
Ido Schimmel464dce12016-07-02 11:00:15 +02003303 mlxsw_sp_router_fini(mlxsw_sp);
3304err_router_init:
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003305 mlxsw_sp_switchdev_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003306err_switchdev_init:
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003307 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003308err_lag_init:
Jiri Pirko0f433fa2016-04-14 18:19:24 +02003309 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003310err_buffers_init:
3311err_flood_init:
3312 mlxsw_sp_traps_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003313 return err;
3314}
3315
Jiri Pirkob2f10572016-04-08 19:11:23 +02003316static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003317{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003318 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003319
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003320 mlxsw_sp_ports_remove(mlxsw_sp);
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003321 mlxsw_sp_counter_pool_fini(mlxsw_sp);
Jiri Pirko22a67762017-02-03 10:29:07 +01003322 mlxsw_sp_acl_fini(mlxsw_sp);
Yotam Gigi763b4b72016-07-21 12:03:17 +02003323 mlxsw_sp_span_fini(mlxsw_sp);
Ido Schimmel464dce12016-07-02 11:00:15 +02003324 mlxsw_sp_router_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003325 mlxsw_sp_switchdev_fini(mlxsw_sp);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003326 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko5113bfd2016-05-06 22:20:59 +02003327 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003328 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02003329 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
Ido Schimmel14d39462016-06-20 23:04:15 +02003330 WARN_ON(!list_empty(&mlxsw_sp->fids));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003331}
3332
3333static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3334 .used_max_vepa_channels = 1,
3335 .max_vepa_channels = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003336 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01003337 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003338 .used_max_pgt = 1,
3339 .max_pgt = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003340 .used_flood_tables = 1,
3341 .used_flood_mode = 1,
3342 .flood_mode = 3,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003343 .max_fid_offset_flood_tables = 3,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003344 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003345 .max_fid_flood_tables = 3,
Ido Schimmel19ae6122015-12-15 16:03:39 +01003346 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003347 .used_max_ib_mc = 1,
3348 .max_ib_mc = 0,
3349 .used_max_pkey = 1,
3350 .max_pkey = 0,
Nogah Frankel403547d2016-09-20 11:16:52 +02003351 .used_kvd_split_data = 1,
3352 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3353 .kvd_hash_single_parts = 2,
3354 .kvd_hash_double_parts = 1,
Jiri Pirkoc6022422016-07-05 11:27:46 +02003355 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003356 .swid_config = {
3357 {
3358 .used_type = 1,
3359 .type = MLXSW_PORT_SWID_TYPE_ETH,
3360 }
3361 },
Nogah Frankel57d316b2016-07-21 12:03:09 +02003362 .resource_query_enable = 1,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003363};
3364
3365static struct mlxsw_driver mlxsw_sp_driver = {
Jiri Pirko1d20d232016-10-27 15:12:59 +02003366 .kind = mlxsw_sp_driver_name,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003367 .priv_size = sizeof(struct mlxsw_sp),
3368 .init = mlxsw_sp_init,
3369 .fini = mlxsw_sp_fini,
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003370 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003371 .port_split = mlxsw_sp_port_split,
3372 .port_unsplit = mlxsw_sp_port_unsplit,
3373 .sb_pool_get = mlxsw_sp_sb_pool_get,
3374 .sb_pool_set = mlxsw_sp_sb_pool_set,
3375 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3376 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3377 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3378 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3379 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3380 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3381 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3382 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3383 .txhdr_construct = mlxsw_sp_txhdr_construct,
3384 .txhdr_len = MLXSW_TXHDR_LEN,
3385 .profile = &mlxsw_sp_config_profile,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003386};
3387
Jiri Pirko22a67762017-02-03 10:29:07 +01003388bool mlxsw_sp_port_dev_check(const struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003389{
3390 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3391}
3392
Jiri Pirko1182e532017-03-06 21:25:20 +01003393static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
David Aherndd823642016-10-17 19:15:49 -07003394{
Jiri Pirko1182e532017-03-06 21:25:20 +01003395 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
David Aherndd823642016-10-17 19:15:49 -07003396 int ret = 0;
3397
3398 if (mlxsw_sp_port_dev_check(lower_dev)) {
Jiri Pirko1182e532017-03-06 21:25:20 +01003399 *p_mlxsw_sp_port = netdev_priv(lower_dev);
David Aherndd823642016-10-17 19:15:49 -07003400 ret = 1;
3401 }
3402
3403 return ret;
3404}
3405
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003406static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3407{
Jiri Pirko1182e532017-03-06 21:25:20 +01003408 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003409
3410 if (mlxsw_sp_port_dev_check(dev))
3411 return netdev_priv(dev);
3412
Jiri Pirko1182e532017-03-06 21:25:20 +01003413 mlxsw_sp_port = NULL;
3414 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003415
Jiri Pirko1182e532017-03-06 21:25:20 +01003416 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003417}
3418
Ido Schimmel4724ba562017-03-10 08:53:39 +01003419struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003420{
3421 struct mlxsw_sp_port *mlxsw_sp_port;
3422
3423 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3424 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3425}
3426
3427static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3428{
Jiri Pirko1182e532017-03-06 21:25:20 +01003429 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003430
3431 if (mlxsw_sp_port_dev_check(dev))
3432 return netdev_priv(dev);
3433
Jiri Pirko1182e532017-03-06 21:25:20 +01003434 mlxsw_sp_port = NULL;
3435 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3436 &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003437
Jiri Pirko1182e532017-03-06 21:25:20 +01003438 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003439}
3440
3441struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3442{
3443 struct mlxsw_sp_port *mlxsw_sp_port;
3444
3445 rcu_read_lock();
3446 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3447 if (mlxsw_sp_port)
3448 dev_hold(mlxsw_sp_port->dev);
3449 rcu_read_unlock();
3450 return mlxsw_sp_port;
3451}
3452
3453void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3454{
3455 dev_put(mlxsw_sp_port->dev);
3456}
3457
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003458static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3459 u16 fid)
3460{
3461 if (mlxsw_sp_fid_is_vfid(fid))
3462 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3463 else
3464 return test_bit(fid, lag_port->active_vlans);
3465}
3466
3467static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3468 u16 fid)
Ido Schimmel039c49a2016-01-27 15:20:18 +01003469{
3470 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003471 u8 local_port = mlxsw_sp_port->local_port;
3472 u16 lag_id = mlxsw_sp_port->lag_id;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003473 u64 max_lag_members;
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003474 int i, count = 0;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003475
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003476 if (!mlxsw_sp_port->lagged)
3477 return true;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003478
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003479 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3480 MAX_LAG_MEMBERS);
3481 for (i = 0; i < max_lag_members; i++) {
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003482 struct mlxsw_sp_port *lag_port;
3483
3484 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3485 if (!lag_port || lag_port->local_port == local_port)
3486 continue;
3487 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3488 count++;
3489 }
3490
3491 return !count;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003492}
3493
3494static int
3495mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3496 u16 fid)
3497{
3498 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3499 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3500
3501 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3502 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3503 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3504 mlxsw_sp_port->local_port);
3505
Ido Schimmel22305372016-06-20 23:04:21 +02003506 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3507 mlxsw_sp_port->local_port, fid);
3508
Ido Schimmel039c49a2016-01-27 15:20:18 +01003509 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3510}
3511
3512static int
Ido Schimmel039c49a2016-01-27 15:20:18 +01003513mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3514 u16 fid)
3515{
3516 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3517 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3518
3519 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3520 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3521 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3522
Ido Schimmel22305372016-06-20 23:04:21 +02003523 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3524 mlxsw_sp_port->lag_id, fid);
3525
Ido Schimmel039c49a2016-01-27 15:20:18 +01003526 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3527}
3528
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003529int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
Ido Schimmel039c49a2016-01-27 15:20:18 +01003530{
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003531 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3532 return 0;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003533
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003534 if (mlxsw_sp_port->lagged)
3535 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
Ido Schimmel039c49a2016-01-27 15:20:18 +01003536 fid);
3537 else
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003538 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
Ido Schimmel039c49a2016-01-27 15:20:18 +01003539}
3540
Ido Schimmel701b1862016-07-04 08:23:16 +02003541static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3542{
3543 struct mlxsw_sp_fid *f, *tmp;
3544
3545 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3546 if (--f->ref_count == 0)
3547 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3548 else
3549 WARN_ON_ONCE(1);
3550}
3551
Ido Schimmel7117a572016-06-20 23:04:06 +02003552static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3553 struct net_device *br_dev)
3554{
3555 return !mlxsw_sp->master_bridge.dev ||
3556 mlxsw_sp->master_bridge.dev == br_dev;
3557}
3558
3559static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3560 struct net_device *br_dev)
3561{
3562 mlxsw_sp->master_bridge.dev = br_dev;
3563 mlxsw_sp->master_bridge.ref_count++;
3564}
3565
3566static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3567{
Ido Schimmel701b1862016-07-04 08:23:16 +02003568 if (--mlxsw_sp->master_bridge.ref_count == 0) {
Ido Schimmel7117a572016-06-20 23:04:06 +02003569 mlxsw_sp->master_bridge.dev = NULL;
Ido Schimmel701b1862016-07-04 08:23:16 +02003570 /* It's possible upper VLAN devices are still holding
3571 * references to underlying FIDs. Drop the reference
3572 * and release the resources if it was the last one.
3573 * If it wasn't, then something bad happened.
3574 */
3575 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3576 }
Ido Schimmel7117a572016-06-20 23:04:06 +02003577}
3578
3579static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3580 struct net_device *br_dev)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003581{
3582 struct net_device *dev = mlxsw_sp_port->dev;
3583 int err;
3584
3585 /* When port is not bridged untagged packets are tagged with
3586 * PVID=VID=1, thereby creating an implicit VLAN interface in
3587 * the device. Remove it and let bridge code take care of its
3588 * own VLANs.
3589 */
3590 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003591 if (err)
3592 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003593
Ido Schimmel7117a572016-06-20 23:04:06 +02003594 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3595
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003596 mlxsw_sp_port->learning = 1;
3597 mlxsw_sp_port->learning_sync = 1;
3598 mlxsw_sp_port->uc_flood = 1;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003599 mlxsw_sp_port->mc_flood = 1;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01003600 mlxsw_sp_port->mc_router = 0;
3601 mlxsw_sp_port->mc_disabled = 1;
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003602 mlxsw_sp_port->bridged = 1;
3603
3604 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003605}
3606
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003607static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003608{
3609 struct net_device *dev = mlxsw_sp_port->dev;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01003610
Ido Schimmel28a01d22016-02-18 11:30:02 +01003611 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3612
Ido Schimmel7117a572016-06-20 23:04:06 +02003613 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3614
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003615 mlxsw_sp_port->learning = 0;
3616 mlxsw_sp_port->learning_sync = 0;
3617 mlxsw_sp_port->uc_flood = 0;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003618 mlxsw_sp_port->mc_flood = 0;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01003619 mlxsw_sp_port->mc_router = 0;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01003620 mlxsw_sp_port->bridged = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003621
3622 /* Add implicit VLAN interface in the device, so that untagged
3623 * packets will be classified to the default vFID.
3624 */
Ido Schimmel82e6db02016-06-20 23:04:04 +02003625 mlxsw_sp_port_add_vid(dev, 0, 1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003626}
3627
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003628static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003629{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003630 char sldr_pl[MLXSW_REG_SLDR_LEN];
3631
3632 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3633 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3634}
3635
3636static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3637{
3638 char sldr_pl[MLXSW_REG_SLDR_LEN];
3639
3640 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3641 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3642}
3643
3644static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3645 u16 lag_id, u8 port_index)
3646{
3647 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3648 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3649
3650 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3651 lag_id, port_index);
3652 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3653}
3654
3655static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3656 u16 lag_id)
3657{
3658 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3659 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3660
3661 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3662 lag_id);
3663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3664}
3665
3666static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3667 u16 lag_id)
3668{
3669 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3670 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3671
3672 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3673 lag_id);
3674 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3675}
3676
3677static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3678 u16 lag_id)
3679{
3680 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3681 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3682
3683 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3684 lag_id);
3685 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3686}
3687
3688static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3689 struct net_device *lag_dev,
3690 u16 *p_lag_id)
3691{
3692 struct mlxsw_sp_upper *lag;
3693 int free_lag_id = -1;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003694 u64 max_lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003695 int i;
3696
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003697 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3698 for (i = 0; i < max_lag; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003699 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3700 if (lag->ref_count) {
3701 if (lag->dev == lag_dev) {
3702 *p_lag_id = i;
3703 return 0;
3704 }
3705 } else if (free_lag_id < 0) {
3706 free_lag_id = i;
3707 }
3708 }
3709 if (free_lag_id < 0)
3710 return -EBUSY;
3711 *p_lag_id = free_lag_id;
3712 return 0;
3713}
3714
3715static bool
3716mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3717 struct net_device *lag_dev,
3718 struct netdev_lag_upper_info *lag_upper_info)
3719{
3720 u16 lag_id;
3721
3722 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3723 return false;
3724 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3725 return false;
3726 return true;
3727}
3728
3729static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3730 u16 lag_id, u8 *p_port_index)
3731{
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003732 u64 max_lag_members;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003733 int i;
3734
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003735 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3736 MAX_LAG_MEMBERS);
3737 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003738 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3739 *p_port_index = i;
3740 return 0;
3741 }
3742 }
3743 return -EBUSY;
3744}
3745
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003746static void
3747mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel186962e2017-03-10 08:53:36 +01003748 struct net_device *lag_dev, u16 lag_id)
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003749{
3750 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel11943ff2016-07-02 11:00:12 +02003751 struct mlxsw_sp_fid *f;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003752
3753 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3754 if (WARN_ON(!mlxsw_sp_vport))
3755 return;
3756
Ido Schimmel11943ff2016-07-02 11:00:12 +02003757 /* If vPort is assigned a RIF, then leave it since it's no
3758 * longer valid.
3759 */
3760 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3761 if (f)
3762 f->leave(mlxsw_sp_vport);
3763
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003764 mlxsw_sp_vport->lag_id = lag_id;
3765 mlxsw_sp_vport->lagged = 1;
Ido Schimmel186962e2017-03-10 08:53:36 +01003766 mlxsw_sp_vport->dev = lag_dev;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003767}
3768
3769static void
3770mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3771{
3772 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel11943ff2016-07-02 11:00:12 +02003773 struct mlxsw_sp_fid *f;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003774
3775 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3776 if (WARN_ON(!mlxsw_sp_vport))
3777 return;
3778
Ido Schimmel11943ff2016-07-02 11:00:12 +02003779 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3780 if (f)
3781 f->leave(mlxsw_sp_vport);
3782
Ido Schimmel186962e2017-03-10 08:53:36 +01003783 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003784 mlxsw_sp_vport->lagged = 0;
3785}
3786
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003787static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3788 struct net_device *lag_dev)
3789{
3790 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3791 struct mlxsw_sp_upper *lag;
3792 u16 lag_id;
3793 u8 port_index;
3794 int err;
3795
3796 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3797 if (err)
3798 return err;
3799 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3800 if (!lag->ref_count) {
3801 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3802 if (err)
3803 return err;
3804 lag->dev = lag_dev;
3805 }
3806
3807 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3808 if (err)
3809 return err;
3810 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3811 if (err)
3812 goto err_col_port_add;
3813 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3814 if (err)
3815 goto err_col_port_enable;
3816
3817 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3818 mlxsw_sp_port->local_port);
3819 mlxsw_sp_port->lag_id = lag_id;
3820 mlxsw_sp_port->lagged = 1;
3821 lag->ref_count++;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003822
Ido Schimmel186962e2017-03-10 08:53:36 +01003823 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003824
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003825 return 0;
3826
Ido Schimmel51554db2016-05-06 22:18:39 +02003827err_col_port_enable:
3828 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003829err_col_port_add:
3830 if (!lag->ref_count)
3831 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003832 return err;
3833}
3834
Ido Schimmel82e6db02016-06-20 23:04:04 +02003835static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3836 struct net_device *lag_dev)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003837{
3838 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003839 u16 lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel1c800752016-06-20 23:04:20 +02003840 struct mlxsw_sp_upper *lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003841
3842 if (!mlxsw_sp_port->lagged)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003843 return;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003844 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3845 WARN_ON(lag->ref_count == 0);
3846
Ido Schimmel82e6db02016-06-20 23:04:04 +02003847 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3848 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003849
Ido Schimmel4dc236c2016-01-27 15:20:16 +01003850 if (mlxsw_sp_port->bridged) {
3851 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003852 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01003853 }
3854
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003855 if (lag->ref_count == 1)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003856 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003857
3858 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3859 mlxsw_sp_port->local_port);
3860 mlxsw_sp_port->lagged = 0;
3861 lag->ref_count--;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003862
3863 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003864}
3865
Jiri Pirko74581202015-12-03 12:12:30 +01003866static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3867 u16 lag_id)
3868{
3869 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3870 char sldr_pl[MLXSW_REG_SLDR_LEN];
3871
3872 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3873 mlxsw_sp_port->local_port);
3874 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3875}
3876
3877static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3878 u16 lag_id)
3879{
3880 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3881 char sldr_pl[MLXSW_REG_SLDR_LEN];
3882
3883 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3884 mlxsw_sp_port->local_port);
3885 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3886}
3887
3888static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3889 bool lag_tx_enabled)
3890{
3891 if (lag_tx_enabled)
3892 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3893 mlxsw_sp_port->lag_id);
3894 else
3895 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3896 mlxsw_sp_port->lag_id);
3897}
3898
3899static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3900 struct netdev_lag_lower_state_info *info)
3901{
3902 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3903}
3904
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003905static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3906 struct net_device *vlan_dev)
3907{
3908 struct mlxsw_sp_port *mlxsw_sp_vport;
3909 u16 vid = vlan_dev_vlan_id(vlan_dev);
3910
3911 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel423b9372016-06-20 23:04:03 +02003912 if (WARN_ON(!mlxsw_sp_vport))
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003913 return -EINVAL;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003914
3915 mlxsw_sp_vport->dev = vlan_dev;
3916
3917 return 0;
3918}
3919
Ido Schimmel82e6db02016-06-20 23:04:04 +02003920static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3921 struct net_device *vlan_dev)
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003922{
3923 struct mlxsw_sp_port *mlxsw_sp_vport;
3924 u16 vid = vlan_dev_vlan_id(vlan_dev);
3925
3926 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel423b9372016-06-20 23:04:03 +02003927 if (WARN_ON(!mlxsw_sp_vport))
Ido Schimmel82e6db02016-06-20 23:04:04 +02003928 return;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003929
3930 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003931}
3932
Jiri Pirko74581202015-12-03 12:12:30 +01003933static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3934 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003935{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003936 struct netdev_notifier_changeupper_info *info;
3937 struct mlxsw_sp_port *mlxsw_sp_port;
3938 struct net_device *upper_dev;
3939 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel80bedf12016-06-20 23:03:59 +02003940 int err = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003941
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003942 mlxsw_sp_port = netdev_priv(dev);
3943 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3944 info = ptr;
3945
3946 switch (event) {
3947 case NETDEV_PRECHANGEUPPER:
3948 upper_dev = info->upper_dev;
Ido Schimmel59fe9b32016-06-20 23:04:00 +02003949 if (!is_vlan_dev(upper_dev) &&
3950 !netif_is_lag_master(upper_dev) &&
3951 !netif_is_bridge_master(upper_dev))
3952 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02003953 if (!info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003954 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003955 /* HW limitation forbids to put ports to multiple bridges. */
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003956 if (netif_is_bridge_master(upper_dev) &&
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003957 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02003958 return -EINVAL;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003959 if (netif_is_lag_master(upper_dev) &&
3960 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3961 info->upper_info))
Ido Schimmel80bedf12016-06-20 23:03:59 +02003962 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02003963 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3964 return -EINVAL;
3965 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3966 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3967 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003968 break;
3969 case NETDEV_CHANGEUPPER:
3970 upper_dev = info->upper_dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003971 if (is_vlan_dev(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02003972 if (info->linking)
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003973 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3974 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02003975 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02003976 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3977 upper_dev);
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003978 } else if (netif_is_bridge_master(upper_dev)) {
Ido Schimmel7117a572016-06-20 23:04:06 +02003979 if (info->linking)
3980 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3981 upper_dev);
3982 else
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003983 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003984 } else if (netif_is_lag_master(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02003985 if (info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003986 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3987 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02003988 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02003989 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3990 upper_dev);
Ido Schimmel59fe9b32016-06-20 23:04:00 +02003991 } else {
3992 err = -EINVAL;
3993 WARN_ON(1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003994 }
3995 break;
3996 }
3997
Ido Schimmel80bedf12016-06-20 23:03:59 +02003998 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003999}
4000
Jiri Pirko74581202015-12-03 12:12:30 +01004001static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4002 unsigned long event, void *ptr)
4003{
4004 struct netdev_notifier_changelowerstate_info *info;
4005 struct mlxsw_sp_port *mlxsw_sp_port;
4006 int err;
4007
4008 mlxsw_sp_port = netdev_priv(dev);
4009 info = ptr;
4010
4011 switch (event) {
4012 case NETDEV_CHANGELOWERSTATE:
4013 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4014 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4015 info->lower_state_info);
4016 if (err)
4017 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4018 }
4019 break;
4020 }
4021
Ido Schimmel80bedf12016-06-20 23:03:59 +02004022 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004023}
4024
4025static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4026 unsigned long event, void *ptr)
4027{
4028 switch (event) {
4029 case NETDEV_PRECHANGEUPPER:
4030 case NETDEV_CHANGEUPPER:
4031 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4032 case NETDEV_CHANGELOWERSTATE:
4033 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4034 }
4035
Ido Schimmel80bedf12016-06-20 23:03:59 +02004036 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004037}
4038
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004039static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4040 unsigned long event, void *ptr)
4041{
4042 struct net_device *dev;
4043 struct list_head *iter;
4044 int ret;
4045
4046 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4047 if (mlxsw_sp_port_dev_check(dev)) {
4048 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004049 if (ret)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004050 return ret;
4051 }
4052 }
4053
Ido Schimmel80bedf12016-06-20 23:03:59 +02004054 return 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004055}
4056
Ido Schimmel701b1862016-07-04 08:23:16 +02004057static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4058 struct net_device *vlan_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004059{
Ido Schimmel701b1862016-07-04 08:23:16 +02004060 u16 fid = vlan_dev_vlan_id(vlan_dev);
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004061 struct mlxsw_sp_fid *f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004062
Ido Schimmel701b1862016-07-04 08:23:16 +02004063 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4064 if (!f) {
4065 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4066 if (IS_ERR(f))
4067 return PTR_ERR(f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004068 }
4069
Ido Schimmel701b1862016-07-04 08:23:16 +02004070 f->ref_count++;
4071
4072 return 0;
4073}
4074
4075static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4076 struct net_device *vlan_dev)
4077{
4078 u16 fid = vlan_dev_vlan_id(vlan_dev);
4079 struct mlxsw_sp_fid *f;
4080
4081 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004082 if (f && f->r)
4083 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
Ido Schimmel701b1862016-07-04 08:23:16 +02004084 if (f && --f->ref_count == 0)
4085 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4086}
4087
4088static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4089 unsigned long event, void *ptr)
4090{
4091 struct netdev_notifier_changeupper_info *info;
4092 struct net_device *upper_dev;
4093 struct mlxsw_sp *mlxsw_sp;
Ido Schimmelb4149702017-03-10 08:53:34 +01004094 int err = 0;
Ido Schimmel701b1862016-07-04 08:23:16 +02004095
4096 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4097 if (!mlxsw_sp)
4098 return 0;
Ido Schimmel701b1862016-07-04 08:23:16 +02004099
4100 info = ptr;
4101
4102 switch (event) {
Ido Schimmelb4149702017-03-10 08:53:34 +01004103 case NETDEV_PRECHANGEUPPER:
Ido Schimmel701b1862016-07-04 08:23:16 +02004104 upper_dev = info->upper_dev;
4105 if (!is_vlan_dev(upper_dev))
Ido Schimmelb4149702017-03-10 08:53:34 +01004106 return -EINVAL;
4107 if (is_vlan_dev(upper_dev) &&
4108 br_dev != mlxsw_sp->master_bridge.dev)
4109 return -EINVAL;
4110 break;
4111 case NETDEV_CHANGEUPPER:
4112 upper_dev = info->upper_dev;
4113 if (is_vlan_dev(upper_dev)) {
4114 if (info->linking)
4115 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4116 upper_dev);
4117 else
4118 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4119 upper_dev);
Ido Schimmel701b1862016-07-04 08:23:16 +02004120 } else {
Ido Schimmelb4149702017-03-10 08:53:34 +01004121 err = -EINVAL;
4122 WARN_ON(1);
Ido Schimmel701b1862016-07-04 08:23:16 +02004123 }
4124 break;
4125 }
4126
Ido Schimmelb4149702017-03-10 08:53:34 +01004127 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004128}
4129
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004130static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004131{
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004132 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
Ido Schimmel99724c12016-07-04 08:23:14 +02004133 MLXSW_SP_VFID_MAX);
4134}
4135
4136static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4137{
4138 char sfmr_pl[MLXSW_REG_SFMR_LEN];
4139
4140 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4141 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004142}
4143
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004144static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
Ido Schimmel1c800752016-06-20 23:04:20 +02004145
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004146static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4147 struct net_device *br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004148{
4149 struct device *dev = mlxsw_sp->bus_info->dev;
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004150 struct mlxsw_sp_fid *f;
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004151 u16 vfid, fid;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004152 int err;
4153
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004154 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004155 if (vfid == MLXSW_SP_VFID_MAX) {
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004156 dev_err(dev, "No available vFIDs\n");
4157 return ERR_PTR(-ERANGE);
4158 }
4159
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004160 fid = mlxsw_sp_vfid_to_fid(vfid);
4161 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004162 if (err) {
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004163 dev_err(dev, "Failed to create FID=%d\n", fid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004164 return ERR_PTR(err);
4165 }
4166
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004167 f = kzalloc(sizeof(*f), GFP_KERNEL);
4168 if (!f)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004169 goto err_allocate_vfid;
4170
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004171 f->leave = mlxsw_sp_vport_vfid_leave;
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004172 f->fid = fid;
4173 f->dev = br_dev;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004174
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004175 list_add(&f->list, &mlxsw_sp->vfids.list);
4176 set_bit(vfid, mlxsw_sp->vfids.mapped);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004177
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004178 return f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004179
4180err_allocate_vfid:
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004181 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004182 return ERR_PTR(-ENOMEM);
4183}
4184
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004185static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4186 struct mlxsw_sp_fid *f)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004187{
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004188 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004189 u16 fid = f->fid;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004190
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004191 clear_bit(vfid, mlxsw_sp->vfids.mapped);
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004192 list_del(&f->list);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004193
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004194 if (f->r)
4195 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004196
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004197 kfree(f);
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004198
4199 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004200}
4201
Ido Schimmel99724c12016-07-04 08:23:14 +02004202static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4203 bool valid)
4204{
4205 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4206 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4207
4208 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4209 vid);
4210}
4211
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004212static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4213 struct net_device *br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004214{
Ido Schimmel0355b592016-06-20 23:04:13 +02004215 struct mlxsw_sp_fid *f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004216 int err;
4217
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004218 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
Ido Schimmel0355b592016-06-20 23:04:13 +02004219 if (!f) {
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004220 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
Ido Schimmel0355b592016-06-20 23:04:13 +02004221 if (IS_ERR(f))
4222 return PTR_ERR(f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004223 }
4224
Ido Schimmel0355b592016-06-20 23:04:13 +02004225 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4226 if (err)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004227 goto err_vport_flood_set;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004228
Ido Schimmel0355b592016-06-20 23:04:13 +02004229 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4230 if (err)
4231 goto err_vport_fid_map;
Ido Schimmel6a9863a2016-02-15 13:19:54 +01004232
Ido Schimmel41b996c2016-06-20 23:04:17 +02004233 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
Ido Schimmel0355b592016-06-20 23:04:13 +02004234 f->ref_count++;
Ido Schimmel039c49a2016-01-27 15:20:18 +01004235
Ido Schimmel22305372016-06-20 23:04:21 +02004236 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4237
Ido Schimmel0355b592016-06-20 23:04:13 +02004238 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004239
Ido Schimmel9c4d4422016-06-20 23:04:10 +02004240err_vport_fid_map:
Ido Schimmel0355b592016-06-20 23:04:13 +02004241 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4242err_vport_flood_set:
4243 if (!f->ref_count)
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004244 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
Ido Schimmel0355b592016-06-20 23:04:13 +02004245 return err;
4246}
4247
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004248static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02004249{
Ido Schimmel41b996c2016-06-20 23:04:17 +02004250 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004251
Ido Schimmel22305372016-06-20 23:04:21 +02004252 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4253
Ido Schimmel0355b592016-06-20 23:04:13 +02004254 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4255
4256 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4257
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004258 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4259
Ido Schimmel41b996c2016-06-20 23:04:17 +02004260 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
Ido Schimmel0355b592016-06-20 23:04:13 +02004261 if (--f->ref_count == 0)
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004262 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004263}
4264
4265static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4266 struct net_device *br_dev)
4267{
Ido Schimmel99724c12016-07-04 08:23:14 +02004268 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004269 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4270 struct net_device *dev = mlxsw_sp_vport->dev;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004271 int err;
4272
Ido Schimmel99724c12016-07-04 08:23:14 +02004273 if (f && !WARN_ON(!f->leave))
4274 f->leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004275
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004276 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004277 if (err) {
Ido Schimmel0355b592016-06-20 23:04:13 +02004278 netdev_err(dev, "Failed to join vFID\n");
Ido Schimmel99724c12016-07-04 08:23:14 +02004279 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004280 }
4281
4282 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4283 if (err) {
4284 netdev_err(dev, "Failed to enable learning\n");
4285 goto err_port_vid_learning_set;
4286 }
4287
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004288 mlxsw_sp_vport->learning = 1;
4289 mlxsw_sp_vport->learning_sync = 1;
4290 mlxsw_sp_vport->uc_flood = 1;
Nogah Frankel71c365b2017-02-09 14:54:46 +01004291 mlxsw_sp_vport->mc_flood = 1;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01004292 mlxsw_sp_vport->mc_router = 0;
4293 mlxsw_sp_vport->mc_disabled = 1;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004294 mlxsw_sp_vport->bridged = 1;
4295
4296 return 0;
4297
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004298err_port_vid_learning_set:
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004299 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004300 return err;
4301}
4302
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004303static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02004304{
4305 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004306
4307 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4308
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004309 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004310
Ido Schimmel0355b592016-06-20 23:04:13 +02004311 mlxsw_sp_vport->learning = 0;
4312 mlxsw_sp_vport->learning_sync = 0;
4313 mlxsw_sp_vport->uc_flood = 0;
Nogah Frankel71c365b2017-02-09 14:54:46 +01004314 mlxsw_sp_vport->mc_flood = 0;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01004315 mlxsw_sp_vport->mc_router = 0;
Ido Schimmel0355b592016-06-20 23:04:13 +02004316 mlxsw_sp_vport->bridged = 0;
4317}
4318
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004319static bool
4320mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4321 const struct net_device *br_dev)
4322{
4323 struct mlxsw_sp_port *mlxsw_sp_vport;
4324
4325 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4326 vport.list) {
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004327 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
Ido Schimmel56918b62016-06-20 23:04:18 +02004328
4329 if (dev && dev == br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004330 return false;
4331 }
4332
4333 return true;
4334}
4335
4336static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4337 unsigned long event, void *ptr,
4338 u16 vid)
4339{
4340 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4341 struct netdev_notifier_changeupper_info *info = ptr;
4342 struct mlxsw_sp_port *mlxsw_sp_vport;
4343 struct net_device *upper_dev;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004344 int err = 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004345
4346 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel1f880612017-03-10 08:53:35 +01004347 if (!mlxsw_sp_vport)
4348 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004349
4350 switch (event) {
4351 case NETDEV_PRECHANGEUPPER:
4352 upper_dev = info->upper_dev;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004353 if (!netif_is_bridge_master(upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004354 return -EINVAL;
Ido Schimmelddbe9932016-06-20 23:04:02 +02004355 if (!info->linking)
4356 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004357 /* We can't have multiple VLAN interfaces configured on
4358 * the same port and being members in the same bridge.
4359 */
4360 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4361 upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004362 return -EINVAL;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004363 break;
4364 case NETDEV_CHANGEUPPER:
4365 upper_dev = info->upper_dev;
Ido Schimmel1f880612017-03-10 08:53:35 +01004366 if (netif_is_bridge_master(upper_dev)) {
4367 if (info->linking)
4368 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4369 upper_dev);
4370 else
4371 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004372 } else {
Ido Schimmel1f880612017-03-10 08:53:35 +01004373 err = -EINVAL;
4374 WARN_ON(1);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004375 }
Ido Schimmel1f880612017-03-10 08:53:35 +01004376 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004377 }
4378
Ido Schimmel80bedf12016-06-20 23:03:59 +02004379 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004380}
4381
Ido Schimmel272c4472015-12-15 16:03:47 +01004382static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4383 unsigned long event, void *ptr,
4384 u16 vid)
4385{
4386 struct net_device *dev;
4387 struct list_head *iter;
4388 int ret;
4389
4390 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4391 if (mlxsw_sp_port_dev_check(dev)) {
4392 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4393 vid);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004394 if (ret)
Ido Schimmel272c4472015-12-15 16:03:47 +01004395 return ret;
4396 }
4397 }
4398
Ido Schimmel80bedf12016-06-20 23:03:59 +02004399 return 0;
Ido Schimmel272c4472015-12-15 16:03:47 +01004400}
4401
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004402static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4403 unsigned long event, void *ptr)
4404{
4405 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4406 u16 vid = vlan_dev_vlan_id(vlan_dev);
4407
Ido Schimmel272c4472015-12-15 16:03:47 +01004408 if (mlxsw_sp_port_dev_check(real_dev))
4409 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4410 vid);
4411 else if (netif_is_lag_master(real_dev))
4412 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4413 vid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004414
Ido Schimmel80bedf12016-06-20 23:03:59 +02004415 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004416}
4417
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004418static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4419 unsigned long event, void *ptr)
4420{
4421 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004422 int err = 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004423
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004424 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4425 err = mlxsw_sp_netdevice_router_port_event(dev);
4426 else if (mlxsw_sp_port_dev_check(dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004427 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4428 else if (netif_is_lag_master(dev))
4429 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
Ido Schimmel701b1862016-07-04 08:23:16 +02004430 else if (netif_is_bridge_master(dev))
4431 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004432 else if (is_vlan_dev(dev))
4433 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004434
Ido Schimmel80bedf12016-06-20 23:03:59 +02004435 return notifier_from_errno(err);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004436}
4437
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004438static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4439 .notifier_call = mlxsw_sp_netdevice_event,
4440};
4441
Ido Schimmel99724c12016-07-04 08:23:14 +02004442static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4443 .notifier_call = mlxsw_sp_inetaddr_event,
4444 .priority = 10, /* Must be called before FIB notifier block */
4445};
4446
Jiri Pirkoe7322632016-09-01 10:37:43 +02004447static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4448 .notifier_call = mlxsw_sp_router_netevent_event,
4449};
4450
Jiri Pirko1d20d232016-10-27 15:12:59 +02004451static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4452 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4453 {0, },
4454};
4455
4456static struct pci_driver mlxsw_sp_pci_driver = {
4457 .name = mlxsw_sp_driver_name,
4458 .id_table = mlxsw_sp_pci_id_table,
4459};
4460
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004461static int __init mlxsw_sp_module_init(void)
4462{
4463 int err;
4464
4465 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004466 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004467 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4468
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004469 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4470 if (err)
4471 goto err_core_driver_register;
Jiri Pirko1d20d232016-10-27 15:12:59 +02004472
4473 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4474 if (err)
4475 goto err_pci_driver_register;
4476
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004477 return 0;
4478
Jiri Pirko1d20d232016-10-27 15:12:59 +02004479err_pci_driver_register:
4480 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004481err_core_driver_register:
Jiri Pirkoe7322632016-09-01 10:37:43 +02004482 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Jiri Pirkode7d6292016-09-01 10:37:42 +02004483 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004484 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4485 return err;
4486}
4487
4488static void __exit mlxsw_sp_module_exit(void)
4489{
Jiri Pirko1d20d232016-10-27 15:12:59 +02004490 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004491 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004492 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004493 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004494 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4495}
4496
4497module_init(mlxsw_sp_module_init);
4498module_exit(mlxsw_sp_module_exit);
4499
4500MODULE_LICENSE("Dual BSD/GPL");
4501MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4502MODULE_DESCRIPTION("Mellanox Spectrum driver");
Jiri Pirko1d20d232016-10-27 15:12:59 +02004503MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);