blob: c440b351a0e4206f62a8d6cd846b102134c3b229 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Jiri Pirko22a67762017-02-03 10:29:07 +01003 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko56ade8f2015-10-16 14:01:37 +02005 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
Jiri Pirko1d20d232016-10-27 15:12:59 +020040#include <linux/pci.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020041#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/ethtool.h>
44#include <linux/slab.h>
45#include <linux/device.h>
46#include <linux/skbuff.h>
47#include <linux/if_vlan.h>
48#include <linux/if_bridge.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010052#include <linux/list.h>
Ido Schimmel80bedf12016-06-20 23:03:59 +020053#include <linux/notifier.h>
Ido Schimmel90183b92016-04-06 17:10:08 +020054#include <linux/dcbnl.h>
Ido Schimmel99724c12016-07-04 08:23:14 +020055#include <linux/inetdevice.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020056#include <net/switchdev.h>
Yotam Gigi763b4b72016-07-21 12:03:17 +020057#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h>
Jiri Pirkoe7322632016-09-01 10:37:43 +020059#include <net/netevent.h>
Yotam Gigi98d0f7b2017-01-23 11:07:11 +010060#include <net/tc_act/tc_sample.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020061
62#include "spectrum.h"
Jiri Pirko1d20d232016-10-27 15:12:59 +020063#include "pci.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020064#include "core.h"
65#include "reg.h"
66#include "port.h"
67#include "trap.h"
68#include "txheader.h"
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +010069#include "spectrum_cnt.h"
Jiri Pirko56ade8f2015-10-16 14:01:37 +020070
71static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
72static const char mlxsw_sp_driver_version[] = "1.0";
73
74/* tx_hdr_version
75 * Tx header version.
76 * Must be set to 1.
77 */
78MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
79
80/* tx_hdr_ctl
81 * Packet control type.
82 * 0 - Ethernet control (e.g. EMADs, LACP)
83 * 1 - Ethernet data
84 */
85MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
86
87/* tx_hdr_proto
88 * Packet protocol type. Must be set to 1 (Ethernet).
89 */
90MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
91
92/* tx_hdr_rx_is_router
93 * Packet is sent from the router. Valid for data packets only.
94 */
95MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
96
97/* tx_hdr_fid_valid
98 * Indicates if the 'fid' field is valid and should be used for
99 * forwarding lookup. Valid for data packets only.
100 */
101MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
102
103/* tx_hdr_swid
104 * Switch partition ID. Must be set to 0.
105 */
106MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
107
108/* tx_hdr_control_tclass
109 * Indicates if the packet should use the control TClass and not one
110 * of the data TClasses.
111 */
112MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
113
114/* tx_hdr_etclass
115 * Egress TClass to be used on the egress device on the egress port.
116 */
117MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
118
119/* tx_hdr_port_mid
120 * Destination local port for unicast packets.
121 * Destination multicast ID for multicast packets.
122 *
123 * Control packets are directed to a specific egress port, while data
124 * packets are transmitted through the CPU port (0) into the switch partition,
125 * where forwarding rules are applied.
126 */
127MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
128
129/* tx_hdr_fid
130 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
131 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
132 * Valid for data packets only.
133 */
134MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
135
136/* tx_hdr_type
137 * 0 - Data packets
138 * 6 - Control packets
139 */
140MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
141
Arkadi Sharshevsky1abcbcc2017-03-11 09:42:53 +0100142int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
143 unsigned int counter_index, u64 *packets,
144 u64 *bytes)
145{
146 char mgpc_pl[MLXSW_REG_MGPC_LEN];
147 int err;
148
149 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
150 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
152 if (err)
153 return err;
154 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
155 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
156 return 0;
157}
158
159static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
160 unsigned int counter_index)
161{
162 char mgpc_pl[MLXSW_REG_MGPC_LEN];
163
164 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
165 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
167}
168
169int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
170 unsigned int *p_counter_index)
171{
172 int err;
173
174 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
175 p_counter_index);
176 if (err)
177 return err;
178 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
179 if (err)
180 goto err_counter_clear;
181 return 0;
182
183err_counter_clear:
184 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
185 *p_counter_index);
186 return err;
187}
188
189void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
190 unsigned int counter_index)
191{
192 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
193 counter_index);
194}
195
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200196static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
197 const struct mlxsw_tx_info *tx_info)
198{
199 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
200
201 memset(txhdr, 0, MLXSW_TXHDR_LEN);
202
203 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
204 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
205 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
206 mlxsw_tx_hdr_swid_set(txhdr, 0);
207 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
208 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
209 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
210}
211
212static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
213{
Elad Raz5b090742016-10-28 21:35:46 +0200214 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200215 int err;
216
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
218 if (err)
219 return err;
220 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
221 return 0;
222}
223
Yotam Gigi763b4b72016-07-21 12:03:17 +0200224static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
225{
Yotam Gigi763b4b72016-07-21 12:03:17 +0200226 int i;
227
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200228 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
Yotam Gigi763b4b72016-07-21 12:03:17 +0200229 return -EIO;
230
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200231 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
232 MAX_SPAN);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200233 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
234 sizeof(struct mlxsw_sp_span_entry),
235 GFP_KERNEL);
236 if (!mlxsw_sp->span.entries)
237 return -ENOMEM;
238
239 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
240 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
241
242 return 0;
243}
244
245static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
246{
247 int i;
248
249 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
250 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
251
252 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
253 }
254 kfree(mlxsw_sp->span.entries);
255}
256
257static struct mlxsw_sp_span_entry *
258mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
259{
260 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
261 struct mlxsw_sp_span_entry *span_entry;
262 char mpat_pl[MLXSW_REG_MPAT_LEN];
263 u8 local_port = port->local_port;
264 int index;
265 int i;
266 int err;
267
268 /* find a free entry to use */
269 index = -1;
270 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
271 if (!mlxsw_sp->span.entries[i].used) {
272 index = i;
273 span_entry = &mlxsw_sp->span.entries[i];
274 break;
275 }
276 }
277 if (index < 0)
278 return NULL;
279
280 /* create a new port analayzer entry for local_port */
281 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
282 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
283 if (err)
284 return NULL;
285
286 span_entry->used = true;
287 span_entry->id = index;
Yotam Gigi2d644d42016-11-11 16:34:25 +0100288 span_entry->ref_count = 1;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200289 span_entry->local_port = local_port;
290 return span_entry;
291}
292
293static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
294 struct mlxsw_sp_span_entry *span_entry)
295{
296 u8 local_port = span_entry->local_port;
297 char mpat_pl[MLXSW_REG_MPAT_LEN];
298 int pa_id = span_entry->id;
299
300 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
301 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
302 span_entry->used = false;
303}
304
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200305static struct mlxsw_sp_span_entry *
306mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200307{
308 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
309 int i;
310
311 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
312 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
313
314 if (curr->used && curr->local_port == port->local_port)
315 return curr;
316 }
317 return NULL;
318}
319
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200320static struct mlxsw_sp_span_entry
321*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
Yotam Gigi763b4b72016-07-21 12:03:17 +0200322{
323 struct mlxsw_sp_span_entry *span_entry;
324
325 span_entry = mlxsw_sp_span_entry_find(port);
326 if (span_entry) {
Yotam Gigi2d644d42016-11-11 16:34:25 +0100327 /* Already exists, just take a reference */
Yotam Gigi763b4b72016-07-21 12:03:17 +0200328 span_entry->ref_count++;
329 return span_entry;
330 }
331
332 return mlxsw_sp_span_entry_create(port);
333}
334
335static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_span_entry *span_entry)
337{
Yotam Gigi2d644d42016-11-11 16:34:25 +0100338 WARN_ON(!span_entry->ref_count);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200339 if (--span_entry->ref_count == 0)
340 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
341 return 0;
342}
343
344static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
345{
346 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
347 struct mlxsw_sp_span_inspected_port *p;
348 int i;
349
350 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
351 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
352
353 list_for_each_entry(p, &curr->bound_ports_list, list)
354 if (p->local_port == port->local_port &&
355 p->type == MLXSW_SP_SPAN_EGRESS)
356 return true;
357 }
358
359 return false;
360}
361
362static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
363{
364 return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
365}
366
367static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
368{
369 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
370 char sbib_pl[MLXSW_REG_SBIB_LEN];
371 int err;
372
373 /* If port is egress mirrored, the shared buffer size should be
374 * updated according to the mtu value
375 */
376 if (mlxsw_sp_span_is_egress_mirror(port)) {
377 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
378 mlxsw_sp_span_mtu_to_buffsize(mtu));
379 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
380 if (err) {
381 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
382 return err;
383 }
384 }
385
386 return 0;
387}
388
389static struct mlxsw_sp_span_inspected_port *
390mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
391 struct mlxsw_sp_span_entry *span_entry)
392{
393 struct mlxsw_sp_span_inspected_port *p;
394
395 list_for_each_entry(p, &span_entry->bound_ports_list, list)
396 if (port->local_port == p->local_port)
397 return p;
398 return NULL;
399}
400
401static int
402mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
403 struct mlxsw_sp_span_entry *span_entry,
404 enum mlxsw_sp_span_type type)
405{
406 struct mlxsw_sp_span_inspected_port *inspected_port;
407 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
408 char mpar_pl[MLXSW_REG_MPAR_LEN];
409 char sbib_pl[MLXSW_REG_SBIB_LEN];
410 int pa_id = span_entry->id;
411 int err;
412
413 /* if it is an egress SPAN, bind a shared buffer to it */
414 if (type == MLXSW_SP_SPAN_EGRESS) {
415 mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
416 mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
417 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
418 if (err) {
419 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
420 return err;
421 }
422 }
423
424 /* bind the port to the SPAN entry */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200425 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
426 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200427 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
428 if (err)
429 goto err_mpar_reg_write;
430
431 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
432 if (!inspected_port) {
433 err = -ENOMEM;
434 goto err_inspected_port_alloc;
435 }
436 inspected_port->local_port = port->local_port;
437 inspected_port->type = type;
438 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
439
440 return 0;
441
442err_mpar_reg_write:
443err_inspected_port_alloc:
444 if (type == MLXSW_SP_SPAN_EGRESS) {
445 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
446 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
447 }
448 return err;
449}
450
451static void
452mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
453 struct mlxsw_sp_span_entry *span_entry,
454 enum mlxsw_sp_span_type type)
455{
456 struct mlxsw_sp_span_inspected_port *inspected_port;
457 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
458 char mpar_pl[MLXSW_REG_MPAR_LEN];
459 char sbib_pl[MLXSW_REG_SBIB_LEN];
460 int pa_id = span_entry->id;
461
462 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
463 if (!inspected_port)
464 return;
465
466 /* remove the inspected port */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200467 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
468 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
Yotam Gigi763b4b72016-07-21 12:03:17 +0200469 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
470
471 /* remove the SBIB buffer if it was egress SPAN */
472 if (type == MLXSW_SP_SPAN_EGRESS) {
473 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
474 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
475 }
476
477 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
478
479 list_del(&inspected_port->list);
480 kfree(inspected_port);
481}
482
483static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
484 struct mlxsw_sp_port *to,
485 enum mlxsw_sp_span_type type)
486{
487 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
488 struct mlxsw_sp_span_entry *span_entry;
489 int err;
490
491 span_entry = mlxsw_sp_span_entry_get(to);
492 if (!span_entry)
493 return -ENOENT;
494
495 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
496 span_entry->id);
497
498 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
499 if (err)
500 goto err_port_bind;
501
502 return 0;
503
504err_port_bind:
505 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
506 return err;
507}
508
509static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
510 struct mlxsw_sp_port *to,
511 enum mlxsw_sp_span_type type)
512{
513 struct mlxsw_sp_span_entry *span_entry;
514
515 span_entry = mlxsw_sp_span_entry_find(to);
516 if (!span_entry) {
517 netdev_err(from->dev, "no span entry found\n");
518 return;
519 }
520
521 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
522 span_entry->id);
523 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
524}
525
Yotam Gigi98d0f7b2017-01-23 11:07:11 +0100526static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
527 bool enable, u32 rate)
528{
529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
530 char mpsc_pl[MLXSW_REG_MPSC_LEN];
531
532 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
533 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
534}
535
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200536static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
537 bool is_up)
538{
539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
540 char paos_pl[MLXSW_REG_PAOS_LEN];
541
542 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
543 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
544 MLXSW_PORT_ADMIN_STATUS_DOWN);
545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
546}
547
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200548static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
549 unsigned char *addr)
550{
551 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
552 char ppad_pl[MLXSW_REG_PPAD_LEN];
553
554 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
555 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
557}
558
559static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
560{
561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
562 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
563
564 ether_addr_copy(addr, mlxsw_sp->base_mac);
565 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
566 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
567}
568
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200569static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
570{
571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572 char pmtu_pl[MLXSW_REG_PMTU_LEN];
573 int max_mtu;
574 int err;
575
576 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
577 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
578 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
579 if (err)
580 return err;
581 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
582
583 if (mtu > max_mtu)
584 return -EINVAL;
585
586 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
587 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
588}
589
Ido Schimmelbe945352016-06-09 09:51:39 +0200590static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
591 u8 swid)
592{
593 char pspa_pl[MLXSW_REG_PSPA_LEN];
594
595 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
597}
598
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200599static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
600{
601 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200602
Ido Schimmelbe945352016-06-09 09:51:39 +0200603 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
604 swid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200605}
606
607static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
608 bool enable)
609{
610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
611 char svpe_pl[MLXSW_REG_SVPE_LEN];
612
613 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
615}
616
617int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
618 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
619 u16 vid)
620{
621 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 char svfa_pl[MLXSW_REG_SVFA_LEN];
623
624 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
625 fid, vid);
626 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
627}
628
Ido Schimmel584d73d2016-08-24 12:00:26 +0200629int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
630 u16 vid_begin, u16 vid_end,
631 bool learn_enable)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200632{
633 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
634 char *spvmlr_pl;
635 int err;
636
637 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
638 if (!spvmlr_pl)
639 return -ENOMEM;
Ido Schimmel584d73d2016-08-24 12:00:26 +0200640 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
641 vid_end, learn_enable);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200642 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
643 kfree(spvmlr_pl);
644 return err;
645}
646
Ido Schimmel584d73d2016-08-24 12:00:26 +0200647static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
648 u16 vid, bool learn_enable)
649{
650 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
651 learn_enable);
652}
653
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200654static int
655mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
656{
657 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
658 char sspr_pl[MLXSW_REG_SSPR_LEN];
659
660 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
661 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
662}
663
Ido Schimmeld664b412016-06-09 09:51:40 +0200664static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
665 u8 local_port, u8 *p_module,
666 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200667{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200668 char pmlp_pl[MLXSW_REG_PMLP_LEN];
669 int err;
670
Ido Schimmel558c2d52016-02-26 17:32:29 +0100671 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200672 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
673 if (err)
674 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100675 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
676 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200677 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200678 return 0;
679}
680
Ido Schimmel18f1e702016-02-26 17:32:31 +0100681static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
682 u8 module, u8 width, u8 lane)
683{
684 char pmlp_pl[MLXSW_REG_PMLP_LEN];
685 int i;
686
687 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
688 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
689 for (i = 0; i < width; i++) {
690 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
691 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
692 }
693
694 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
695}
696
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100697static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
698{
699 char pmlp_pl[MLXSW_REG_PMLP_LEN];
700
701 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
702 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
704}
705
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200706static int mlxsw_sp_port_open(struct net_device *dev)
707{
708 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
709 int err;
710
711 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
712 if (err)
713 return err;
714 netif_start_queue(dev);
715 return 0;
716}
717
718static int mlxsw_sp_port_stop(struct net_device *dev)
719{
720 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
721
722 netif_stop_queue(dev);
723 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
724}
725
726static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
727 struct net_device *dev)
728{
729 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
730 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
731 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
732 const struct mlxsw_tx_info tx_info = {
733 .local_port = mlxsw_sp_port->local_port,
734 .is_emad = false,
735 };
736 u64 len;
737 int err;
738
Jiri Pirko307c2432016-04-08 19:11:22 +0200739 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200740 return NETDEV_TX_BUSY;
741
742 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
743 struct sk_buff *skb_orig = skb;
744
745 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
746 if (!skb) {
747 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
748 dev_kfree_skb_any(skb_orig);
749 return NETDEV_TX_OK;
750 }
Arkadi Sharshevsky36bf38d2017-01-12 09:10:37 +0100751 dev_consume_skb_any(skb_orig);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200752 }
753
754 if (eth_skb_pad(skb)) {
755 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
756 return NETDEV_TX_OK;
757 }
758
759 mlxsw_sp_txhdr_construct(skb, &tx_info);
Nogah Frankel63dcdd32016-06-17 15:09:05 +0200760 /* TX header is consumed by HW on the way so we shouldn't count its
761 * bytes as being sent.
762 */
763 len = skb->len - MLXSW_TXHDR_LEN;
764
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200765 /* Due to a race we might fail here because of a full queue. In that
766 * unlikely case we simply drop the packet.
767 */
Jiri Pirko307c2432016-04-08 19:11:22 +0200768 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200769
770 if (!err) {
771 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
772 u64_stats_update_begin(&pcpu_stats->syncp);
773 pcpu_stats->tx_packets++;
774 pcpu_stats->tx_bytes += len;
775 u64_stats_update_end(&pcpu_stats->syncp);
776 } else {
777 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
778 dev_kfree_skb_any(skb);
779 }
780 return NETDEV_TX_OK;
781}
782
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100783static void mlxsw_sp_set_rx_mode(struct net_device *dev)
784{
785}
786
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200787static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
788{
789 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
790 struct sockaddr *addr = p;
791 int err;
792
793 if (!is_valid_ether_addr(addr->sa_data))
794 return -EADDRNOTAVAIL;
795
796 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
797 if (err)
798 return err;
799 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
800 return 0;
801}
802
Ido Schimmelf417f042017-03-24 08:02:50 +0100803static u16 mlxsw_sp_pg_buf_threshold_get(int mtu)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200804{
Ido Schimmelf417f042017-03-24 08:02:50 +0100805 return 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
806}
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200807
Ido Schimmelf417f042017-03-24 08:02:50 +0100808#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
809static u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
810{
811 delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
812 return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
813}
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200814
Ido Schimmelf417f042017-03-24 08:02:50 +0100815/* Maximum delay buffer needed in case of PAUSE frames, in cells.
816 * Assumes 100m cable and maximum MTU.
817 */
818#define MLXSW_SP_PAUSE_DELAY 612
819static u16 mlxsw_sp_pg_buf_delay_get(int mtu, u16 delay, bool pfc, bool pause)
820{
821 if (pfc)
822 return mlxsw_sp_pfc_delay_get(mtu, delay);
823 else if (pause)
824 return MLXSW_SP_PAUSE_DELAY;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200825 else
Ido Schimmelf417f042017-03-24 08:02:50 +0100826 return 0;
827}
828
829static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
830 bool lossy)
831{
832 if (lossy)
833 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
834 else
835 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
836 thres);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200837}
838
839int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200840 u8 *prio_tc, bool pause_en,
841 struct ieee_pfc *my_pfc)
Ido Schimmelff6551e2016-04-06 17:10:03 +0200842{
843 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200844 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
845 u16 delay = !!my_pfc ? my_pfc->delay : 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200846 char pbmc_pl[MLXSW_REG_PBMC_LEN];
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200847 int i, j, err;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200848
849 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
850 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
851 if (err)
852 return err;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200853
854 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
855 bool configure = false;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200856 bool pfc = false;
Ido Schimmelf417f042017-03-24 08:02:50 +0100857 bool lossy;
858 u16 thres;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200859
860 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
861 if (prio_tc[j] == i) {
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200862 pfc = pfc_en & BIT(j);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200863 configure = true;
864 break;
865 }
866 }
867
868 if (!configure)
869 continue;
Ido Schimmelf417f042017-03-24 08:02:50 +0100870
871 lossy = !(pfc || pause_en);
872 thres = mlxsw_sp_pg_buf_threshold_get(mtu);
873 delay = mlxsw_sp_pg_buf_delay_get(mtu, delay, pfc, pause_en);
874 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200875 }
876
Ido Schimmelff6551e2016-04-06 17:10:03 +0200877 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
878}
879
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200880static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200881 int mtu, bool pause_en)
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200882{
883 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
884 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200885 struct ieee_pfc *my_pfc;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200886 u8 *prio_tc;
887
888 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200889 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200890
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200891 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
Ido Schimmeld81a6bd2016-04-06 17:10:16 +0200892 pause_en, my_pfc);
Ido Schimmel8e8dfe92016-04-06 17:10:10 +0200893}
894
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200895static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
896{
897 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200898 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200899 int err;
900
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200901 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200902 if (err)
903 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +0200904 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
905 if (err)
906 goto err_span_port_mtu_update;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200907 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
908 if (err)
909 goto err_port_mtu_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200910 dev->mtu = mtu;
911 return 0;
Ido Schimmelff6551e2016-04-06 17:10:03 +0200912
913err_port_mtu_set:
Yotam Gigi763b4b72016-07-21 12:03:17 +0200914 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
915err_span_port_mtu_update:
Ido Schimmel9f7ec052016-04-06 17:10:14 +0200916 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
Ido Schimmelff6551e2016-04-06 17:10:03 +0200917 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200918}
919
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +0300920static int
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200921mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
922 struct rtnl_link_stats64 *stats)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200923{
924 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
925 struct mlxsw_sp_port_pcpu_stats *p;
926 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
927 u32 tx_dropped = 0;
928 unsigned int start;
929 int i;
930
931 for_each_possible_cpu(i) {
932 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
933 do {
934 start = u64_stats_fetch_begin_irq(&p->syncp);
935 rx_packets = p->rx_packets;
936 rx_bytes = p->rx_bytes;
937 tx_packets = p->tx_packets;
938 tx_bytes = p->tx_bytes;
939 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
940
941 stats->rx_packets += rx_packets;
942 stats->rx_bytes += rx_bytes;
943 stats->tx_packets += tx_packets;
944 stats->tx_bytes += tx_bytes;
945 /* tx_dropped is u32, updated without syncp protection. */
946 tx_dropped += p->tx_dropped;
947 }
948 stats->tx_dropped = tx_dropped;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200949 return 0;
950}
951
Or Gerlitz3df5b3c2016-11-22 23:09:54 +0200952static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200953{
954 switch (attr_id) {
955 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
956 return true;
957 }
958
959 return false;
960}
961
Or Gerlitz4bdcc6c2016-09-20 08:14:08 +0300962static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
963 void *sp)
Nogah Frankelfc1bbb02016-09-16 15:05:38 +0200964{
965 switch (attr_id) {
966 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
967 return mlxsw_sp_port_get_sw_stats64(dev, sp);
968 }
969
970 return -EINVAL;
971}
972
973static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
974 int prio, char *ppcnt_pl)
975{
976 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
977 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
978
979 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
980 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
981}
982
983static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
984 struct rtnl_link_stats64 *stats)
985{
986 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
987 int err;
988
989 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
990 0, ppcnt_pl);
991 if (err)
992 goto out;
993
994 stats->tx_packets =
995 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
996 stats->rx_packets =
997 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
998 stats->tx_bytes =
999 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1000 stats->rx_bytes =
1001 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1002 stats->multicast =
1003 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1004
1005 stats->rx_crc_errors =
1006 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1007 stats->rx_frame_errors =
1008 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1009
1010 stats->rx_length_errors = (
1011 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1012 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1013 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1014
1015 stats->rx_errors = (stats->rx_crc_errors +
1016 stats->rx_frame_errors + stats->rx_length_errors);
1017
1018out:
1019 return err;
1020}
1021
1022static void update_stats_cache(struct work_struct *work)
1023{
1024 struct mlxsw_sp_port *mlxsw_sp_port =
1025 container_of(work, struct mlxsw_sp_port,
1026 hw_stats.update_dw.work);
1027
1028 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1029 goto out;
1030
1031 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1032 mlxsw_sp_port->hw_stats.cache);
1033
1034out:
1035 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1036 MLXSW_HW_STATS_UPDATE_TIME);
1037}
1038
1039/* Return the stats from a cache that is updated periodically,
1040 * as this function might get called in an atomic context.
1041 */
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001042static void
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001043mlxsw_sp_port_get_stats64(struct net_device *dev,
1044 struct rtnl_link_stats64 *stats)
1045{
1046 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1047
1048 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001049}
1050
1051int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1052 u16 vid_end, bool is_member, bool untagged)
1053{
1054 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1055 char *spvm_pl;
1056 int err;
1057
1058 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1059 if (!spvm_pl)
1060 return -ENOMEM;
1061
1062 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1063 vid_end, is_member, untagged);
1064 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1065 kfree(spvm_pl);
1066 return err;
1067}
1068
1069static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1070{
1071 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1072 u16 vid, last_visited_vid;
1073 int err;
1074
1075 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1076 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
1077 vid);
1078 if (err) {
1079 last_visited_vid = vid;
1080 goto err_port_vid_to_fid_set;
1081 }
1082 }
1083
1084 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1085 if (err) {
1086 last_visited_vid = VLAN_N_VID;
1087 goto err_port_vid_to_fid_set;
1088 }
1089
1090 return 0;
1091
1092err_port_vid_to_fid_set:
1093 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1094 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1095 vid);
1096 return err;
1097}
1098
1099static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1100{
1101 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1102 u16 vid;
1103 int err;
1104
1105 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1106 if (err)
1107 return err;
1108
1109 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1110 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1111 vid, vid);
1112 if (err)
1113 return err;
1114 }
1115
1116 return 0;
1117}
1118
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001119static struct mlxsw_sp_port *
Ido Schimmel0355b592016-06-20 23:04:13 +02001120mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001121{
1122 struct mlxsw_sp_port *mlxsw_sp_vport;
1123
1124 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1125 if (!mlxsw_sp_vport)
1126 return NULL;
1127
1128 /* dev will be set correctly after the VLAN device is linked
1129 * with the real device. In case of bridge SELF invocation, dev
1130 * will remain as is.
1131 */
1132 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1133 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1134 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1135 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
Ido Schimmel272c4472015-12-15 16:03:47 +01001136 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1137 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel0355b592016-06-20 23:04:13 +02001138 mlxsw_sp_vport->vport.vid = vid;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001139
1140 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1141
1142 return mlxsw_sp_vport;
1143}
1144
1145static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1146{
1147 list_del(&mlxsw_sp_vport->vport.list);
1148 kfree(mlxsw_sp_vport);
1149}
1150
Ido Schimmel05978482016-08-17 16:39:30 +02001151static int mlxsw_sp_port_add_vid(struct net_device *dev,
1152 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001153{
1154 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001155 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel52697a92016-07-02 11:00:09 +02001156 bool untagged = vid == 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001157 int err;
1158
1159 /* VLAN 0 is added to HW filter when device goes up, but it is
1160 * reserved in our case, so simply return.
1161 */
1162 if (!vid)
1163 return 0;
1164
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001165 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001166 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001167
Ido Schimmel0355b592016-06-20 23:04:13 +02001168 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001169 if (!mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02001170 return -ENOMEM;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001171
1172 /* When adding the first VLAN interface on a bridged port we need to
1173 * transition all the active 802.1Q bridge VLANs to use explicit
1174 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1175 */
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001176 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001177 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001178 if (err)
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001179 goto err_port_vp_mode_trans;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001180 }
1181
Ido Schimmel52697a92016-07-02 11:00:09 +02001182 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
Ido Schimmelfa66d7e2016-08-17 16:39:29 +02001183 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001184 goto err_port_add_vid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001185
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001186 return 0;
1187
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001188err_port_add_vid:
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001189 if (list_is_singular(&mlxsw_sp_port->vports_list))
1190 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1191err_port_vp_mode_trans:
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001192 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001193 return err;
1194}
1195
Ido Schimmel32d863f2016-07-02 11:00:10 +02001196static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1197 __be16 __always_unused proto, u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001198{
1199 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001200 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel1c800752016-06-20 23:04:20 +02001201 struct mlxsw_sp_fid *f;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001202
1203 /* VLAN 0 is removed from HW filter when device goes down, but
1204 * it is reserved in our case, so simply return.
1205 */
1206 if (!vid)
1207 return 0;
1208
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001209 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel7a355832016-08-17 16:39:28 +02001210 if (WARN_ON(!mlxsw_sp_vport))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001211 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001212
Ido Schimmel7a355832016-08-17 16:39:28 +02001213 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001214
Ido Schimmel1c800752016-06-20 23:04:20 +02001215 /* Drop FID reference. If this was the last reference the
1216 * resources will be freed.
1217 */
1218 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1219 if (f && !WARN_ON(!f->leave))
1220 f->leave(mlxsw_sp_vport);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001221
1222 /* When removing the last VLAN interface on a bridged port we need to
1223 * transition all active 802.1Q bridge VLANs to use VID to FID
1224 * mappings and set port's mode to VLAN mode.
1225 */
Ido Schimmel7a355832016-08-17 16:39:28 +02001226 if (list_is_singular(&mlxsw_sp_port->vports_list))
1227 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001228
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001229 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1230
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001231 return 0;
1232}
1233
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001234static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1235 size_t len)
1236{
1237 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmeld664b412016-06-09 09:51:40 +02001238 u8 module = mlxsw_sp_port->mapping.module;
1239 u8 width = mlxsw_sp_port->mapping.width;
1240 u8 lane = mlxsw_sp_port->mapping.lane;
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001241 int err;
1242
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001243 if (!mlxsw_sp_port->split)
1244 err = snprintf(name, len, "p%d", module + 1);
1245 else
1246 err = snprintf(name, len, "p%ds%d", module + 1,
1247 lane / width);
1248
1249 if (err >= len)
1250 return -EINVAL;
1251
1252 return 0;
1253}
1254
Yotam Gigi763b4b72016-07-21 12:03:17 +02001255static struct mlxsw_sp_port_mall_tc_entry *
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001256mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1257 unsigned long cookie) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001258 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1259
1260 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1261 if (mall_tc_entry->cookie == cookie)
1262 return mall_tc_entry;
1263
1264 return NULL;
1265}
1266
1267static int
1268mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001269 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001270 const struct tc_action *a,
1271 bool ingress)
1272{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001273 struct net *net = dev_net(mlxsw_sp_port->dev);
1274 enum mlxsw_sp_span_type span_type;
1275 struct mlxsw_sp_port *to_port;
1276 struct net_device *to_dev;
1277 int ifindex;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001278
1279 ifindex = tcf_mirred_ifindex(a);
1280 to_dev = __dev_get_by_index(net, ifindex);
1281 if (!to_dev) {
1282 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1283 return -EINVAL;
1284 }
1285
1286 if (!mlxsw_sp_port_dev_check(to_dev)) {
1287 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
Yotam Gigie915ac62017-01-09 11:25:48 +01001288 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001289 }
1290 to_port = netdev_priv(to_dev);
1291
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001292 mirror->to_local_port = to_port->local_port;
1293 mirror->ingress = ingress;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001294 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001295 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1296}
Yotam Gigi763b4b72016-07-21 12:03:17 +02001297
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001298static void
1299mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1300 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1301{
1302 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1303 enum mlxsw_sp_span_type span_type;
1304 struct mlxsw_sp_port *to_port;
1305
1306 to_port = mlxsw_sp->ports[mirror->to_local_port];
1307 span_type = mirror->ingress ?
1308 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1309 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001310}
1311
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001312static int
1313mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1314 struct tc_cls_matchall_offload *cls,
1315 const struct tc_action *a,
1316 bool ingress)
1317{
1318 int err;
1319
1320 if (!mlxsw_sp_port->sample)
1321 return -EOPNOTSUPP;
1322 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1323 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1324 return -EEXIST;
1325 }
1326 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1327 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1328 return -EOPNOTSUPP;
1329 }
1330
1331 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1332 tcf_sample_psample_group(a));
1333 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1334 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1335 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1336
1337 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1338 if (err)
1339 goto err_port_sample_set;
1340 return 0;
1341
1342err_port_sample_set:
1343 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1344 return err;
1345}
1346
1347static void
1348mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1349{
1350 if (!mlxsw_sp_port->sample)
1351 return;
1352
1353 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1354 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1355}
1356
Yotam Gigi763b4b72016-07-21 12:03:17 +02001357static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1358 __be16 protocol,
1359 struct tc_cls_matchall_offload *cls,
1360 bool ingress)
1361{
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001362 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001363 const struct tc_action *a;
WANG Cong22dc13c2016-08-13 22:35:00 -07001364 LIST_HEAD(actions);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001365 int err;
1366
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001367 if (!tc_single_action(cls->exts)) {
Yotam Gigi763b4b72016-07-21 12:03:17 +02001368 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
Yotam Gigie915ac62017-01-09 11:25:48 +01001369 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001370 }
1371
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001372 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1373 if (!mall_tc_entry)
1374 return -ENOMEM;
1375 mall_tc_entry->cookie = cls->cookie;
Ido Schimmel86cb13e2016-07-25 13:12:33 +03001376
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001377 tcf_exts_to_list(cls->exts, &actions);
1378 a = list_first_entry(&actions, struct tc_action, list);
1379
1380 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1381 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1382
1383 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1384 mirror = &mall_tc_entry->mirror;
1385 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1386 mirror, a, ingress);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001387 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1388 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1389 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1390 a, ingress);
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001391 } else {
1392 err = -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001393 }
1394
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001395 if (err)
1396 goto err_add_action;
1397
1398 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001399 return 0;
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001400
1401err_add_action:
1402 kfree(mall_tc_entry);
1403 return err;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001404}
1405
1406static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1407 struct tc_cls_matchall_offload *cls)
1408{
Yotam Gigi763b4b72016-07-21 12:03:17 +02001409 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001410
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001411 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1412 cls->cookie);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001413 if (!mall_tc_entry) {
1414 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1415 return;
1416 }
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001417 list_del(&mall_tc_entry->list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001418
1419 switch (mall_tc_entry->type) {
1420 case MLXSW_SP_PORT_MALL_MIRROR:
Yotam Gigi65acb5d2017-01-09 11:25:46 +01001421 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1422 &mall_tc_entry->mirror);
Yotam Gigi763b4b72016-07-21 12:03:17 +02001423 break;
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01001424 case MLXSW_SP_PORT_MALL_SAMPLE:
1425 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1426 break;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001427 default:
1428 WARN_ON(1);
1429 }
1430
Yotam Gigi763b4b72016-07-21 12:03:17 +02001431 kfree(mall_tc_entry);
1432}
1433
1434static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1435 __be16 proto, struct tc_to_netdev *tc)
1436{
1437 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1438 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1439
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001440 switch (tc->type) {
1441 case TC_SETUP_MATCHALL:
Yotam Gigi763b4b72016-07-21 12:03:17 +02001442 switch (tc->cls_mall->command) {
1443 case TC_CLSMATCHALL_REPLACE:
1444 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1445 proto,
1446 tc->cls_mall,
1447 ingress);
1448 case TC_CLSMATCHALL_DESTROY:
1449 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1450 tc->cls_mall);
1451 return 0;
1452 default:
Or Gerlitzabbdf4b2017-03-17 09:38:01 +01001453 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001454 }
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001455 case TC_SETUP_CLSFLOWER:
1456 switch (tc->cls_flower->command) {
1457 case TC_CLSFLOWER_REPLACE:
1458 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1459 proto, tc->cls_flower);
1460 case TC_CLSFLOWER_DESTROY:
1461 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1462 tc->cls_flower);
1463 return 0;
Arkadi Sharshevsky7c1b8eb2017-03-11 09:42:59 +01001464 case TC_CLSFLOWER_STATS:
1465 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1466 tc->cls_flower);
Jiri Pirko7aa0f5a2017-02-03 10:29:09 +01001467 default:
1468 return -EOPNOTSUPP;
1469 }
Yotam Gigi763b4b72016-07-21 12:03:17 +02001470 }
1471
Yotam Gigie915ac62017-01-09 11:25:48 +01001472 return -EOPNOTSUPP;
Yotam Gigi763b4b72016-07-21 12:03:17 +02001473}
1474
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001475static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1476 .ndo_open = mlxsw_sp_port_open,
1477 .ndo_stop = mlxsw_sp_port_stop,
1478 .ndo_start_xmit = mlxsw_sp_port_xmit,
Yotam Gigi763b4b72016-07-21 12:03:17 +02001479 .ndo_setup_tc = mlxsw_sp_setup_tc,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +01001480 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001481 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1482 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1483 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001484 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1485 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001486 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1487 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1488 .ndo_fdb_add = switchdev_port_fdb_add,
1489 .ndo_fdb_del = switchdev_port_fdb_del,
1490 .ndo_fdb_dump = switchdev_port_fdb_dump,
1491 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1492 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1493 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Ido Schimmel2bf9a582016-04-05 10:20:04 +02001494 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001495};
1496
1497static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1498 struct ethtool_drvinfo *drvinfo)
1499{
1500 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1501 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1502
1503 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1504 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1505 sizeof(drvinfo->version));
1506 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1507 "%d.%d.%d",
1508 mlxsw_sp->bus_info->fw_rev.major,
1509 mlxsw_sp->bus_info->fw_rev.minor,
1510 mlxsw_sp->bus_info->fw_rev.subminor);
1511 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1512 sizeof(drvinfo->bus_info));
1513}
1514
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001515static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1516 struct ethtool_pauseparam *pause)
1517{
1518 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1519
1520 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1521 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1522}
1523
1524static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1525 struct ethtool_pauseparam *pause)
1526{
1527 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1528
1529 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1530 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1531 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1532
1533 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1534 pfcc_pl);
1535}
1536
1537static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1538 struct ethtool_pauseparam *pause)
1539{
1540 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1541 bool pause_en = pause->tx_pause || pause->rx_pause;
1542 int err;
1543
Ido Schimmeld81a6bd2016-04-06 17:10:16 +02001544 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1545 netdev_err(dev, "PFC already enabled on port\n");
1546 return -EINVAL;
1547 }
1548
Ido Schimmel9f7ec052016-04-06 17:10:14 +02001549 if (pause->autoneg) {
1550 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1551 return -EINVAL;
1552 }
1553
1554 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1555 if (err) {
1556 netdev_err(dev, "Failed to configure port's headroom\n");
1557 return err;
1558 }
1559
1560 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1561 if (err) {
1562 netdev_err(dev, "Failed to set PAUSE parameters\n");
1563 goto err_port_pause_configure;
1564 }
1565
1566 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1567 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1568
1569 return 0;
1570
1571err_port_pause_configure:
1572 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1573 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1574 return err;
1575}
1576
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001577struct mlxsw_sp_port_hw_stats {
1578 char str[ETH_GSTRING_LEN];
Jiri Pirko412791d2016-10-21 16:07:19 +02001579 u64 (*getter)(const char *payload);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001580};
1581
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001582static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001583 {
1584 .str = "a_frames_transmitted_ok",
1585 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1586 },
1587 {
1588 .str = "a_frames_received_ok",
1589 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1590 },
1591 {
1592 .str = "a_frame_check_sequence_errors",
1593 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1594 },
1595 {
1596 .str = "a_alignment_errors",
1597 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1598 },
1599 {
1600 .str = "a_octets_transmitted_ok",
1601 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1602 },
1603 {
1604 .str = "a_octets_received_ok",
1605 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1606 },
1607 {
1608 .str = "a_multicast_frames_xmitted_ok",
1609 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1610 },
1611 {
1612 .str = "a_broadcast_frames_xmitted_ok",
1613 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1614 },
1615 {
1616 .str = "a_multicast_frames_received_ok",
1617 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1618 },
1619 {
1620 .str = "a_broadcast_frames_received_ok",
1621 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1622 },
1623 {
1624 .str = "a_in_range_length_errors",
1625 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1626 },
1627 {
1628 .str = "a_out_of_range_length_field",
1629 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1630 },
1631 {
1632 .str = "a_frame_too_long_errors",
1633 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1634 },
1635 {
1636 .str = "a_symbol_error_during_carrier",
1637 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1638 },
1639 {
1640 .str = "a_mac_control_frames_transmitted",
1641 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1642 },
1643 {
1644 .str = "a_mac_control_frames_received",
1645 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1646 },
1647 {
1648 .str = "a_unsupported_opcodes_received",
1649 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1650 },
1651 {
1652 .str = "a_pause_mac_ctrl_frames_received",
1653 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1654 },
1655 {
1656 .str = "a_pause_mac_ctrl_frames_xmitted",
1657 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1658 },
1659};
1660
1661#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1662
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001663static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1664 {
1665 .str = "rx_octets_prio",
1666 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1667 },
1668 {
1669 .str = "rx_frames_prio",
1670 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1671 },
1672 {
1673 .str = "tx_octets_prio",
1674 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1675 },
1676 {
1677 .str = "tx_frames_prio",
1678 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1679 },
1680 {
1681 .str = "rx_pause_prio",
1682 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1683 },
1684 {
1685 .str = "rx_pause_duration_prio",
1686 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1687 },
1688 {
1689 .str = "tx_pause_prio",
1690 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1691 },
1692 {
1693 .str = "tx_pause_duration_prio",
1694 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1695 },
1696};
1697
1698#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1699
Jiri Pirko412791d2016-10-21 16:07:19 +02001700static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001701{
1702 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1703
1704 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1705}
1706
1707static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1708 {
1709 .str = "tc_transmit_queue_tc",
1710 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1711 },
1712 {
1713 .str = "tc_no_buffer_discard_uc_tc",
1714 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1715 },
1716};
1717
1718#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1719
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001720#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001721 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1722 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001723 IEEE_8021QAZ_MAX_TCS)
1724
1725static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1726{
1727 int i;
1728
1729 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1730 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1731 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1732 *p += ETH_GSTRING_LEN;
1733 }
1734}
1735
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001736static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1737{
1738 int i;
1739
1740 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1741 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1742 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1743 *p += ETH_GSTRING_LEN;
1744 }
1745}
1746
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001747static void mlxsw_sp_port_get_strings(struct net_device *dev,
1748 u32 stringset, u8 *data)
1749{
1750 u8 *p = data;
1751 int i;
1752
1753 switch (stringset) {
1754 case ETH_SS_STATS:
1755 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1756 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1757 ETH_GSTRING_LEN);
1758 p += ETH_GSTRING_LEN;
1759 }
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001760
1761 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1762 mlxsw_sp_port_get_prio_strings(&p, i);
1763
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001764 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1765 mlxsw_sp_port_get_tc_strings(&p, i);
1766
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001767 break;
1768 }
1769}
1770
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001771static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1772 enum ethtool_phys_id_state state)
1773{
1774 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1775 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1776 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1777 bool active;
1778
1779 switch (state) {
1780 case ETHTOOL_ID_ACTIVE:
1781 active = true;
1782 break;
1783 case ETHTOOL_ID_INACTIVE:
1784 active = false;
1785 break;
1786 default:
1787 return -EOPNOTSUPP;
1788 }
1789
1790 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1791 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1792}
1793
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001794static int
1795mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1796 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1797{
1798 switch (grp) {
1799 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1800 *p_hw_stats = mlxsw_sp_port_hw_stats;
1801 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1802 break;
1803 case MLXSW_REG_PPCNT_PRIO_CNT:
1804 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1805 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1806 break;
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001807 case MLXSW_REG_PPCNT_TC_CNT:
1808 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1809 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1810 break;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001811 default:
1812 WARN_ON(1);
Yotam Gigie915ac62017-01-09 11:25:48 +01001813 return -EOPNOTSUPP;
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001814 }
1815 return 0;
1816}
1817
1818static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1819 enum mlxsw_reg_ppcnt_grp grp, int prio,
1820 u64 *data, int data_index)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001821{
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001822 struct mlxsw_sp_port_hw_stats *hw_stats;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001823 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001824 int i, len;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001825 int err;
1826
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001827 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1828 if (err)
1829 return;
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02001830 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001831 for (i = 0; i < len; i++)
Colin Ian Kingfaac0ff2016-09-23 12:02:45 +01001832 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001833}
1834
1835static void mlxsw_sp_port_get_stats(struct net_device *dev,
1836 struct ethtool_stats *stats, u64 *data)
1837{
1838 int i, data_index = 0;
1839
1840 /* IEEE 802.3 Counters */
1841 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1842 data, data_index);
1843 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1844
1845 /* Per-Priority Counters */
1846 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1847 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1848 data, data_index);
1849 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1850 }
Ido Schimmeldf4750e2016-07-19 15:35:54 +02001851
1852 /* Per-TC Counters */
1853 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1854 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1855 data, data_index);
1856 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1857 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001858}
1859
1860static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1861{
1862 switch (sset) {
1863 case ETH_SS_STATS:
Ido Schimmel7ed674b2016-07-19 15:35:53 +02001864 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001865 default:
1866 return -EOPNOTSUPP;
1867 }
1868}
1869
1870struct mlxsw_sp_port_link_mode {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001871 enum ethtool_link_mode_bit_indices mask_ethtool;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001872 u32 mask;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001873 u32 speed;
1874};
1875
1876static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1877 {
1878 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001879 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1880 .speed = SPEED_100,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001881 },
1882 {
1883 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1884 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001885 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1886 .speed = SPEED_1000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001887 },
1888 {
1889 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001890 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1891 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001892 },
1893 {
1894 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1895 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001896 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1897 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001898 },
1899 {
1900 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1901 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1902 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1903 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001904 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1905 .speed = SPEED_10000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001906 },
1907 {
1908 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001909 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1910 .speed = SPEED_20000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001911 },
1912 {
1913 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001914 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1915 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001916 },
1917 {
1918 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001919 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1920 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001921 },
1922 {
1923 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001924 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1925 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001926 },
1927 {
1928 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001929 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1930 .speed = SPEED_40000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001931 },
1932 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001933 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1934 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1935 .speed = SPEED_25000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001936 },
1937 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001938 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1939 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1940 .speed = SPEED_25000,
1941 },
1942 {
1943 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1944 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1945 .speed = SPEED_25000,
1946 },
1947 {
1948 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1949 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1950 .speed = SPEED_25000,
1951 },
1952 {
1953 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1954 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1955 .speed = SPEED_50000,
1956 },
1957 {
1958 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1959 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1960 .speed = SPEED_50000,
1961 },
1962 {
1963 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1964 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1965 .speed = SPEED_50000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001966 },
1967 {
1968 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001969 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1970 .speed = SPEED_56000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001971 },
1972 {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02001973 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1974 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1975 .speed = SPEED_56000,
1976 },
1977 {
1978 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1979 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1980 .speed = SPEED_56000,
1981 },
1982 {
1983 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1984 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
1985 .speed = SPEED_56000,
1986 },
1987 {
1988 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
1989 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1990 .speed = SPEED_100000,
1991 },
1992 {
1993 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
1994 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1995 .speed = SPEED_100000,
1996 },
1997 {
1998 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
1999 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2000 .speed = SPEED_100000,
2001 },
2002 {
2003 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2004 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2005 .speed = SPEED_100000,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002006 },
2007};
2008
2009#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2010
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002011static void
2012mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2013 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002014{
2015 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2016 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2017 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2018 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2019 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2020 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002021 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002022
2023 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2024 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2025 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2026 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2027 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002028 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002029}
2030
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002031static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002032{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002033 int i;
2034
2035 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2036 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002037 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2038 mode);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002039 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002040}
2041
2042static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002043 struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002044{
2045 u32 speed = SPEED_UNKNOWN;
2046 u8 duplex = DUPLEX_UNKNOWN;
2047 int i;
2048
2049 if (!carrier_ok)
2050 goto out;
2051
2052 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2053 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2054 speed = mlxsw_sp_port_link_mode[i].speed;
2055 duplex = DUPLEX_FULL;
2056 break;
2057 }
2058 }
2059out:
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002060 cmd->base.speed = speed;
2061 cmd->base.duplex = duplex;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002062}
2063
2064static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2065{
2066 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2067 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2068 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2069 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2070 return PORT_FIBRE;
2071
2072 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2073 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2074 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2075 return PORT_DA;
2076
2077 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2078 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2079 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2080 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2081 return PORT_NONE;
2082
2083 return PORT_OTHER;
2084}
2085
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002086static u32
2087mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002088{
2089 u32 ptys_proto = 0;
2090 int i;
2091
2092 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002093 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2094 cmd->link_modes.advertising))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002095 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2096 }
2097 return ptys_proto;
2098}
2099
2100static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2101{
2102 u32 ptys_proto = 0;
2103 int i;
2104
2105 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2106 if (speed == mlxsw_sp_port_link_mode[i].speed)
2107 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2108 }
2109 return ptys_proto;
2110}
2111
Ido Schimmel18f1e702016-02-26 17:32:31 +01002112static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2113{
2114 u32 ptys_proto = 0;
2115 int i;
2116
2117 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2118 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2119 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2120 }
2121 return ptys_proto;
2122}
2123
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002124static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2125 struct ethtool_link_ksettings *cmd)
2126{
2127 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2128 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2129 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2130
2131 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2132 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2133}
2134
2135static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2136 struct ethtool_link_ksettings *cmd)
2137{
2138 if (!autoneg)
2139 return;
2140
2141 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2142 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2143}
2144
2145static void
2146mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2147 struct ethtool_link_ksettings *cmd)
2148{
2149 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2150 return;
2151
2152 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2153 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2154}
2155
2156static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2157 struct ethtool_link_ksettings *cmd)
2158{
2159 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2160 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2161 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2162 char ptys_pl[MLXSW_REG_PTYS_LEN];
2163 u8 autoneg_status;
2164 bool autoneg;
2165 int err;
2166
2167 autoneg = mlxsw_sp_port->link.autoneg;
Elad Raz401c8b42016-10-28 21:35:52 +02002168 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002169 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2170 if (err)
2171 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002172 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2173 &eth_proto_oper);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002174
2175 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2176
2177 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2178
2179 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2180 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2181 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2182
2183 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2184 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2185 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2186 cmd);
2187
2188 return 0;
2189}
2190
2191static int
2192mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2193 const struct ethtool_link_ksettings *cmd)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002194{
2195 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2196 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2197 char ptys_pl[MLXSW_REG_PTYS_LEN];
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002198 u32 eth_proto_cap, eth_proto_new;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002199 bool autoneg;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002200 int err;
2201
Elad Raz401c8b42016-10-28 21:35:52 +02002202 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002203 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002204 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002205 return err;
Elad Raz401c8b42016-10-28 21:35:52 +02002206 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002207
2208 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2209 eth_proto_new = autoneg ?
2210 mlxsw_sp_to_ptys_advert_link(cmd) :
2211 mlxsw_sp_to_ptys_speed(cmd->base.speed);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002212
2213 eth_proto_new = eth_proto_new & eth_proto_cap;
2214 if (!eth_proto_new) {
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002215 netdev_err(dev, "No supported speed requested\n");
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002216 return -EINVAL;
2217 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002218
Elad Raz401c8b42016-10-28 21:35:52 +02002219 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2220 eth_proto_new);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002221 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002222 if (err)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002223 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002224
Ido Schimmel6277d462016-07-15 11:14:58 +02002225 if (!netif_running(dev))
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002226 return 0;
2227
Ido Schimmel0c83f882016-09-12 13:26:23 +02002228 mlxsw_sp_port->link.autoneg = autoneg;
2229
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002230 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2231 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002232
2233 return 0;
2234}
2235
2236static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2237 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2238 .get_link = ethtool_op_get_link,
Ido Schimmel9f7ec052016-04-06 17:10:14 +02002239 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2240 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002241 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01002242 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002243 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2244 .get_sset_count = mlxsw_sp_port_get_sset_count,
Ido Schimmelb9d66a32016-09-12 13:26:27 +02002245 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2246 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002247};
2248
Ido Schimmel18f1e702016-02-26 17:32:31 +01002249static int
2250mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2251{
2252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2253 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2254 char ptys_pl[MLXSW_REG_PTYS_LEN];
2255 u32 eth_proto_admin;
2256
2257 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
Elad Raz401c8b42016-10-28 21:35:52 +02002258 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2259 eth_proto_admin);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2261}
2262
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002263int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2264 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2265 bool dwrr, u8 dwrr_weight)
Ido Schimmel90183b92016-04-06 17:10:08 +02002266{
2267 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2268 char qeec_pl[MLXSW_REG_QEEC_LEN];
2269
2270 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2271 next_index);
2272 mlxsw_reg_qeec_de_set(qeec_pl, true);
2273 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2274 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2276}
2277
Ido Schimmelcc7cf512016-04-06 17:10:11 +02002278int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2279 enum mlxsw_reg_qeec_hr hr, u8 index,
2280 u8 next_index, u32 maxrate)
Ido Schimmel90183b92016-04-06 17:10:08 +02002281{
2282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2283 char qeec_pl[MLXSW_REG_QEEC_LEN];
2284
2285 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2286 next_index);
2287 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2288 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2289 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2290}
2291
Ido Schimmel8e8dfe92016-04-06 17:10:10 +02002292int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2293 u8 switch_prio, u8 tclass)
Ido Schimmel90183b92016-04-06 17:10:08 +02002294{
2295 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2296 char qtct_pl[MLXSW_REG_QTCT_LEN];
2297
2298 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2299 tclass);
2300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2301}
2302
2303static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2304{
2305 int err, i;
2306
2307 /* Setup the elements hierarcy, so that each TC is linked to
2308 * one subgroup, which are all member in the same group.
2309 */
2310 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2311 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2312 0);
2313 if (err)
2314 return err;
2315 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2316 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2317 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2318 0, false, 0);
2319 if (err)
2320 return err;
2321 }
2322 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2323 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2324 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2325 false, 0);
2326 if (err)
2327 return err;
2328 }
2329
2330 /* Make sure the max shaper is disabled in all hierarcies that
2331 * support it.
2332 */
2333 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2334 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2335 MLXSW_REG_QEEC_MAS_DIS);
2336 if (err)
2337 return err;
2338 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2339 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2340 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2341 i, 0,
2342 MLXSW_REG_QEEC_MAS_DIS);
2343 if (err)
2344 return err;
2345 }
2346 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2347 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2348 MLXSW_REG_QEEC_HIERARCY_TC,
2349 i, i,
2350 MLXSW_REG_QEEC_MAS_DIS);
2351 if (err)
2352 return err;
2353 }
2354
2355 /* Map all priorities to traffic class 0. */
2356 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2357 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2358 if (err)
2359 return err;
2360 }
2361
2362 return 0;
2363}
2364
Ido Schimmel05978482016-08-17 16:39:30 +02002365static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2366{
2367 mlxsw_sp_port->pvid = 1;
2368
2369 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2370}
2371
2372static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2373{
2374 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2375}
2376
Jiri Pirko67963a32016-10-28 21:35:55 +02002377static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2378 bool split, u8 module, u8 width, u8 lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002379{
2380 struct mlxsw_sp_port *mlxsw_sp_port;
2381 struct net_device *dev;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002382 size_t bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002383 int err;
2384
2385 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2386 if (!dev)
2387 return -ENOMEM;
Jiri Pirkof20a91f2016-10-27 15:13:00 +02002388 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002389 mlxsw_sp_port = netdev_priv(dev);
2390 mlxsw_sp_port->dev = dev;
2391 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2392 mlxsw_sp_port->local_port = local_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002393 mlxsw_sp_port->split = split;
Ido Schimmeld664b412016-06-09 09:51:40 +02002394 mlxsw_sp_port->mapping.module = module;
2395 mlxsw_sp_port->mapping.width = width;
2396 mlxsw_sp_port->mapping.lane = lane;
Ido Schimmel0c83f882016-09-12 13:26:23 +02002397 mlxsw_sp_port->link.autoneg = 1;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002398 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2399 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2400 if (!mlxsw_sp_port->active_vlans) {
2401 err = -ENOMEM;
2402 goto err_port_active_vlans_alloc;
2403 }
Elad Razfc1273a2016-01-06 13:01:11 +01002404 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2405 if (!mlxsw_sp_port->untagged_vlans) {
2406 err = -ENOMEM;
2407 goto err_port_untagged_vlans_alloc;
2408 }
Ido Schimmel7f71eb42015-12-15 16:03:37 +01002409 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
Yotam Gigi763b4b72016-07-21 12:03:17 +02002410 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002411
2412 mlxsw_sp_port->pcpu_stats =
2413 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2414 if (!mlxsw_sp_port->pcpu_stats) {
2415 err = -ENOMEM;
2416 goto err_alloc_stats;
2417 }
2418
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002419 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2420 GFP_KERNEL);
2421 if (!mlxsw_sp_port->sample) {
2422 err = -ENOMEM;
2423 goto err_alloc_sample;
2424 }
2425
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002426 mlxsw_sp_port->hw_stats.cache =
2427 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2428
2429 if (!mlxsw_sp_port->hw_stats.cache) {
2430 err = -ENOMEM;
2431 goto err_alloc_hw_stats;
2432 }
2433 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2434 &update_stats_cache);
2435
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002436 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2437 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2438
Ido Schimmel3247ff22016-09-08 08:16:02 +02002439 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2440 if (err) {
2441 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2442 mlxsw_sp_port->local_port);
2443 goto err_port_swid_set;
2444 }
2445
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002446 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2447 if (err) {
2448 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2449 mlxsw_sp_port->local_port);
2450 goto err_dev_addr_init;
2451 }
2452
2453 netif_carrier_off(dev);
2454
2455 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
Yotam Gigi763b4b72016-07-21 12:03:17 +02002456 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2457 dev->hw_features |= NETIF_F_HW_TC;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002458
Jarod Wilsond894be52016-10-20 13:55:16 -04002459 dev->min_mtu = 0;
2460 dev->max_mtu = ETH_MAX_MTU;
2461
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002462 /* Each packet needs to have a Tx header (metadata) on top all other
2463 * headers.
2464 */
Yotam Gigifeb7d382016-10-04 09:46:04 +02002465 dev->needed_headroom = MLXSW_TXHDR_LEN;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002466
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002467 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2468 if (err) {
2469 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2470 mlxsw_sp_port->local_port);
2471 goto err_port_system_port_mapping_set;
2472 }
2473
Ido Schimmel18f1e702016-02-26 17:32:31 +01002474 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2475 if (err) {
2476 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2477 mlxsw_sp_port->local_port);
2478 goto err_port_speed_by_width_set;
2479 }
2480
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002481 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2482 if (err) {
2483 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2484 mlxsw_sp_port->local_port);
2485 goto err_port_mtu_set;
2486 }
2487
2488 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2489 if (err)
2490 goto err_port_admin_status_set;
2491
2492 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2493 if (err) {
2494 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2495 mlxsw_sp_port->local_port);
2496 goto err_port_buffers_init;
2497 }
2498
Ido Schimmel90183b92016-04-06 17:10:08 +02002499 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2500 if (err) {
2501 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2502 mlxsw_sp_port->local_port);
2503 goto err_port_ets_init;
2504 }
2505
Ido Schimmelf00817d2016-04-06 17:10:09 +02002506 /* ETS and buffers must be initialized before DCB. */
2507 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2508 if (err) {
2509 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2510 mlxsw_sp_port->local_port);
2511 goto err_port_dcb_init;
2512 }
2513
Ido Schimmel05978482016-08-17 16:39:30 +02002514 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2515 if (err) {
2516 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2517 mlxsw_sp_port->local_port);
2518 goto err_port_pvid_vport_create;
2519 }
2520
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002521 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
Ido Schimmel2f258442016-08-17 16:39:31 +02002522 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002523 err = register_netdev(dev);
2524 if (err) {
2525 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2526 mlxsw_sp_port->local_port);
2527 goto err_register_netdev;
2528 }
2529
Elad Razd808c7e2016-10-28 21:35:57 +02002530 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2531 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2532 module);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002533 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002534 return 0;
2535
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002536err_register_netdev:
Ido Schimmel2f258442016-08-17 16:39:31 +02002537 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002538 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmel05978482016-08-17 16:39:30 +02002539 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2540err_port_pvid_vport_create:
Ido Schimmel4de34eb2016-08-04 17:36:22 +03002541 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002542err_port_dcb_init:
Ido Schimmel90183b92016-04-06 17:10:08 +02002543err_port_ets_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002544err_port_buffers_init:
2545err_port_admin_status_set:
2546err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01002547err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002548err_port_system_port_mapping_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002549err_dev_addr_init:
Ido Schimmel3247ff22016-09-08 08:16:02 +02002550 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2551err_port_swid_set:
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002552 kfree(mlxsw_sp_port->hw_stats.cache);
2553err_alloc_hw_stats:
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002554 kfree(mlxsw_sp_port->sample);
2555err_alloc_sample:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002556 free_percpu(mlxsw_sp_port->pcpu_stats);
2557err_alloc_stats:
Elad Razfc1273a2016-01-06 13:01:11 +01002558 kfree(mlxsw_sp_port->untagged_vlans);
2559err_port_untagged_vlans_alloc:
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002560 kfree(mlxsw_sp_port->active_vlans);
2561err_port_active_vlans_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002562 free_netdev(dev);
2563 return err;
2564}
2565
Jiri Pirko67963a32016-10-28 21:35:55 +02002566static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2567 bool split, u8 module, u8 width, u8 lane)
2568{
2569 int err;
2570
2571 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2572 if (err) {
2573 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2574 local_port);
2575 return err;
2576 }
Ido Schimmel9a60c902016-12-16 19:29:03 +01002577 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
Jiri Pirko67963a32016-10-28 21:35:55 +02002578 module, width, lane);
2579 if (err)
2580 goto err_port_create;
2581 return 0;
2582
2583err_port_create:
2584 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2585 return err;
2586}
2587
2588static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002589{
2590 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2591
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002592 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
Jiri Pirko67963a32016-10-28 21:35:55 +02002593 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002594 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Ido Schimmel2f258442016-08-17 16:39:31 +02002595 mlxsw_sp->ports[local_port] = NULL;
Ido Schimmel05832722016-08-17 16:39:35 +02002596 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmel05978482016-08-17 16:39:30 +02002597 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
Ido Schimmelf00817d2016-04-06 17:10:09 +02002598 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01002599 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2600 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
Nogah Frankelfc1bbb02016-09-16 15:05:38 +02002601 kfree(mlxsw_sp_port->hw_stats.cache);
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002602 kfree(mlxsw_sp_port->sample);
Yotam Gigi136f1442017-01-09 11:25:47 +01002603 free_percpu(mlxsw_sp_port->pcpu_stats);
Elad Razfc1273a2016-01-06 13:01:11 +01002604 kfree(mlxsw_sp_port->untagged_vlans);
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01002605 kfree(mlxsw_sp_port->active_vlans);
Ido Schimmel32d863f2016-07-02 11:00:10 +02002606 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002607 free_netdev(mlxsw_sp_port->dev);
2608}
2609
Jiri Pirko67963a32016-10-28 21:35:55 +02002610static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2611{
2612 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2613 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2614}
2615
Jiri Pirkof83e2102016-10-28 21:35:49 +02002616static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2617{
2618 return mlxsw_sp->ports[local_port] != NULL;
2619}
2620
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002621static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2622{
2623 int i;
2624
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002625 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002626 if (mlxsw_sp_port_created(mlxsw_sp, i))
2627 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002628 kfree(mlxsw_sp->port_to_module);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002629 kfree(mlxsw_sp->ports);
2630}
2631
2632static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2633{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002634 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
Ido Schimmeld664b412016-06-09 09:51:40 +02002635 u8 module, width, lane;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002636 size_t alloc_size;
2637 int i;
2638 int err;
2639
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002640 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002641 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2642 if (!mlxsw_sp->ports)
2643 return -ENOMEM;
2644
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002645 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2646 if (!mlxsw_sp->port_to_module) {
2647 err = -ENOMEM;
2648 goto err_port_to_module_alloc;
2649 }
2650
2651 for (i = 1; i < max_ports; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01002652 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002653 &width, &lane);
Ido Schimmel558c2d52016-02-26 17:32:29 +01002654 if (err)
2655 goto err_port_module_info_get;
2656 if (!width)
2657 continue;
2658 mlxsw_sp->port_to_module[i] = module;
Jiri Pirko67963a32016-10-28 21:35:55 +02002659 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2660 module, width, lane);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002661 if (err)
2662 goto err_port_create;
2663 }
2664 return 0;
2665
2666err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01002667err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002668 for (i--; i >= 1; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002669 if (mlxsw_sp_port_created(mlxsw_sp, i))
2670 mlxsw_sp_port_remove(mlxsw_sp, i);
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002671 kfree(mlxsw_sp->port_to_module);
2672err_port_to_module_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002673 kfree(mlxsw_sp->ports);
2674 return err;
2675}
2676
Ido Schimmel18f1e702016-02-26 17:32:31 +01002677static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2678{
2679 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2680
2681 return local_port - offset;
2682}
2683
Ido Schimmelbe945352016-06-09 09:51:39 +02002684static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2685 u8 module, unsigned int count)
2686{
2687 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2688 int err, i;
2689
2690 for (i = 0; i < count; i++) {
2691 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2692 width, i * width);
2693 if (err)
2694 goto err_port_module_map;
2695 }
2696
2697 for (i = 0; i < count; i++) {
2698 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2699 if (err)
2700 goto err_port_swid_set;
2701 }
2702
2703 for (i = 0; i < count; i++) {
2704 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
Ido Schimmeld664b412016-06-09 09:51:40 +02002705 module, width, i * width);
Ido Schimmelbe945352016-06-09 09:51:39 +02002706 if (err)
2707 goto err_port_create;
2708 }
2709
2710 return 0;
2711
2712err_port_create:
2713 for (i--; i >= 0; i--)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002714 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2715 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmelbe945352016-06-09 09:51:39 +02002716 i = count;
2717err_port_swid_set:
2718 for (i--; i >= 0; i--)
2719 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2720 MLXSW_PORT_SWID_DISABLED_PORT);
2721 i = count;
2722err_port_module_map:
2723 for (i--; i >= 0; i--)
2724 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2725 return err;
2726}
2727
2728static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2729 u8 base_port, unsigned int count)
2730{
2731 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2732 int i;
2733
2734 /* Split by four means we need to re-create two ports, otherwise
2735 * only one.
2736 */
2737 count = count / 2;
2738
2739 for (i = 0; i < count; i++) {
2740 local_port = base_port + i * 2;
2741 module = mlxsw_sp->port_to_module[local_port];
2742
2743 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2744 0);
2745 }
2746
2747 for (i = 0; i < count; i++)
2748 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2749
2750 for (i = 0; i < count; i++) {
2751 local_port = base_port + i * 2;
2752 module = mlxsw_sp->port_to_module[local_port];
2753
2754 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
Ido Schimmeld664b412016-06-09 09:51:40 +02002755 width, 0);
Ido Schimmelbe945352016-06-09 09:51:39 +02002756 }
2757}
2758
Jiri Pirkob2f10572016-04-08 19:11:23 +02002759static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2760 unsigned int count)
Ido Schimmel18f1e702016-02-26 17:32:31 +01002761{
Jiri Pirkob2f10572016-04-08 19:11:23 +02002762 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002763 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002764 u8 module, cur_width, base_port;
2765 int i;
2766 int err;
2767
2768 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2769 if (!mlxsw_sp_port) {
2770 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2771 local_port);
2772 return -EINVAL;
2773 }
2774
Ido Schimmeld664b412016-06-09 09:51:40 +02002775 module = mlxsw_sp_port->mapping.module;
2776 cur_width = mlxsw_sp_port->mapping.width;
2777
Ido Schimmel18f1e702016-02-26 17:32:31 +01002778 if (count != 2 && count != 4) {
2779 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2780 return -EINVAL;
2781 }
2782
Ido Schimmel18f1e702016-02-26 17:32:31 +01002783 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2784 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2785 return -EINVAL;
2786 }
2787
2788 /* Make sure we have enough slave (even) ports for the split. */
2789 if (count == 2) {
2790 base_port = local_port;
2791 if (mlxsw_sp->ports[base_port + 1]) {
2792 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2793 return -EINVAL;
2794 }
2795 } else {
2796 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2797 if (mlxsw_sp->ports[base_port + 1] ||
2798 mlxsw_sp->ports[base_port + 3]) {
2799 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2800 return -EINVAL;
2801 }
2802 }
2803
2804 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002805 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2806 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002807
Ido Schimmelbe945352016-06-09 09:51:39 +02002808 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2809 if (err) {
2810 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2811 goto err_port_split_create;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002812 }
2813
2814 return 0;
2815
Ido Schimmelbe945352016-06-09 09:51:39 +02002816err_port_split_create:
2817 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002818 return err;
2819}
2820
Jiri Pirkob2f10572016-04-08 19:11:23 +02002821static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
Ido Schimmel18f1e702016-02-26 17:32:31 +01002822{
Jiri Pirkob2f10572016-04-08 19:11:23 +02002823 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002824 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmeld664b412016-06-09 09:51:40 +02002825 u8 cur_width, base_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002826 unsigned int count;
2827 int i;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002828
2829 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2830 if (!mlxsw_sp_port) {
2831 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2832 local_port);
2833 return -EINVAL;
2834 }
2835
2836 if (!mlxsw_sp_port->split) {
2837 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2838 return -EINVAL;
2839 }
2840
Ido Schimmeld664b412016-06-09 09:51:40 +02002841 cur_width = mlxsw_sp_port->mapping.width;
Ido Schimmel18f1e702016-02-26 17:32:31 +01002842 count = cur_width == 1 ? 4 : 2;
2843
2844 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2845
2846 /* Determine which ports to remove. */
2847 if (count == 2 && local_port >= base_port + 2)
2848 base_port = base_port + 2;
2849
2850 for (i = 0; i < count; i++)
Jiri Pirkof83e2102016-10-28 21:35:49 +02002851 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2852 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002853
Ido Schimmelbe945352016-06-09 09:51:39 +02002854 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
Ido Schimmel18f1e702016-02-26 17:32:31 +01002855
2856 return 0;
2857}
2858
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002859static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2860 char *pude_pl, void *priv)
2861{
2862 struct mlxsw_sp *mlxsw_sp = priv;
2863 struct mlxsw_sp_port *mlxsw_sp_port;
2864 enum mlxsw_reg_pude_oper_status status;
2865 u8 local_port;
2866
2867 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2868 mlxsw_sp_port = mlxsw_sp->ports[local_port];
Ido Schimmelbbf2a472016-07-02 11:00:14 +02002869 if (!mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002870 return;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002871
2872 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2873 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2874 netdev_info(mlxsw_sp_port->dev, "link up\n");
2875 netif_carrier_on(mlxsw_sp_port->dev);
2876 } else {
2877 netdev_info(mlxsw_sp_port->dev, "link down\n");
2878 netif_carrier_off(mlxsw_sp_port->dev);
2879 }
2880}
2881
Nogah Frankel14eeda92016-11-25 10:33:32 +01002882static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2883 u8 local_port, void *priv)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002884{
2885 struct mlxsw_sp *mlxsw_sp = priv;
2886 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2887 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2888
2889 if (unlikely(!mlxsw_sp_port)) {
2890 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2891 local_port);
2892 return;
2893 }
2894
2895 skb->dev = mlxsw_sp_port->dev;
2896
2897 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2898 u64_stats_update_begin(&pcpu_stats->syncp);
2899 pcpu_stats->rx_packets++;
2900 pcpu_stats->rx_bytes += skb->len;
2901 u64_stats_update_end(&pcpu_stats->syncp);
2902
2903 skb->protocol = eth_type_trans(skb, skb->dev);
2904 netif_receive_skb(skb);
2905}
2906
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02002907static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2908 void *priv)
2909{
2910 skb->offload_fwd_mark = 1;
Nogah Frankel14eeda92016-11-25 10:33:32 +01002911 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
Ido Schimmel1c6c6d22016-08-25 18:42:40 +02002912}
2913
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002914static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
2915 void *priv)
2916{
2917 struct mlxsw_sp *mlxsw_sp = priv;
2918 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2919 struct psample_group *psample_group;
2920 u32 size;
2921
2922 if (unlikely(!mlxsw_sp_port)) {
2923 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2924 local_port);
2925 goto out;
2926 }
2927 if (unlikely(!mlxsw_sp_port->sample)) {
2928 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
2929 local_port);
2930 goto out;
2931 }
2932
2933 size = mlxsw_sp_port->sample->truncate ?
2934 mlxsw_sp_port->sample->trunc_size : skb->len;
2935
2936 rcu_read_lock();
2937 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
2938 if (!psample_group)
2939 goto out_unlock;
2940 psample_sample_packet(psample_group, skb, size,
2941 mlxsw_sp_port->dev->ifindex, 0,
2942 mlxsw_sp_port->sample->rate);
2943out_unlock:
2944 rcu_read_unlock();
2945out:
2946 consume_skb(skb);
2947}
2948
Nogah Frankel117b0da2016-11-25 10:33:44 +01002949#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel0fb78a42016-11-25 10:33:39 +01002950 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01002951 _is_ctrl, SP_##_trap_group, DISCARD)
Ido Schimmel93393b32016-08-25 18:42:38 +02002952
Nogah Frankel117b0da2016-11-25 10:33:44 +01002953#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
Nogah Frankel14eeda92016-11-25 10:33:32 +01002954 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
Nogah Frankel117b0da2016-11-25 10:33:44 +01002955 _is_ctrl, SP_##_trap_group, DISCARD)
2956
2957#define MLXSW_SP_EVENTL(_func, _trap_id) \
2958 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
Nogah Frankel14eeda92016-11-25 10:33:32 +01002959
Nogah Frankel45449132016-11-25 10:33:35 +01002960static const struct mlxsw_listener mlxsw_sp_listener[] = {
2961 /* Events */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002962 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
Nogah Frankelee4a60d2016-11-25 10:33:29 +01002963 /* L2 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002964 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2965 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2966 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2967 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2968 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2969 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2970 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2971 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2972 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2973 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2974 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
Ido Schimmel93393b32016-08-25 18:42:38 +02002975 /* L3 traps */
Nogah Frankel117b0da2016-11-25 10:33:44 +01002976 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2977 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2978 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2979 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2980 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2981 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2982 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2983 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
Yotam Gigi98d0f7b2017-01-23 11:07:11 +01002984 /* PKT Sample trap */
2985 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
2986 false, SP_IP2ME, DISCARD)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002987};
2988
Nogah Frankel9148e7c2016-11-25 10:33:47 +01002989static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2990{
2991 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2992 enum mlxsw_reg_qpcr_ir_units ir_units;
2993 int max_cpu_policers;
2994 bool is_bytes;
2995 u8 burst_size;
2996 u32 rate;
2997 int i, err;
2998
2999 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3000 return -EIO;
3001
3002 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3003
3004 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3005 for (i = 0; i < max_cpu_policers; i++) {
3006 is_bytes = false;
3007 switch (i) {
3008 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3009 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3010 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3011 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3012 rate = 128;
3013 burst_size = 7;
3014 break;
3015 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3016 rate = 16 * 1024;
3017 burst_size = 10;
3018 break;
3019 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3020 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3021 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3022 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3023 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3024 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3025 rate = 1024;
3026 burst_size = 7;
3027 break;
3028 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3029 is_bytes = true;
3030 rate = 4 * 1024;
3031 burst_size = 4;
3032 break;
3033 default:
3034 continue;
3035 }
3036
3037 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3038 burst_size);
3039 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3040 if (err)
3041 return err;
3042 }
3043
3044 return 0;
3045}
3046
Nogah Frankel579c82e2016-11-25 10:33:42 +01003047static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003048{
3049 char htgt_pl[MLXSW_REG_HTGT_LEN];
Nogah Frankel117b0da2016-11-25 10:33:44 +01003050 enum mlxsw_reg_htgt_trap_group i;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003051 int max_cpu_policers;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003052 int max_trap_groups;
3053 u8 priority, tc;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003054 u16 policer_id;
Nogah Frankel117b0da2016-11-25 10:33:44 +01003055 int err;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003056
3057 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3058 return -EIO;
3059
3060 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003061 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003062
3063 for (i = 0; i < max_trap_groups; i++) {
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003064 policer_id = i;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003065 switch (i) {
Nogah Frankel117b0da2016-11-25 10:33:44 +01003066 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3067 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3068 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3069 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3070 priority = 5;
3071 tc = 5;
3072 break;
3073 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3074 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3075 priority = 4;
3076 tc = 4;
3077 break;
3078 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3079 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3080 priority = 3;
3081 tc = 3;
3082 break;
3083 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3084 priority = 2;
3085 tc = 2;
3086 break;
3087 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3088 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3089 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3090 priority = 1;
3091 tc = 1;
3092 break;
3093 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
Nogah Frankel579c82e2016-11-25 10:33:42 +01003094 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3095 tc = MLXSW_REG_HTGT_DEFAULT_TC;
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003096 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
Nogah Frankel579c82e2016-11-25 10:33:42 +01003097 break;
3098 default:
3099 continue;
3100 }
Nogah Frankel117b0da2016-11-25 10:33:44 +01003101
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003102 if (max_cpu_policers <= policer_id &&
3103 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3104 return -EIO;
3105
3106 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
Nogah Frankel579c82e2016-11-25 10:33:42 +01003107 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3108 if (err)
3109 return err;
3110 }
3111
3112 return 0;
3113}
3114
3115static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3116{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003117 int i;
3118 int err;
3119
Nogah Frankel9148e7c2016-11-25 10:33:47 +01003120 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3121 if (err)
3122 return err;
3123
Nogah Frankel579c82e2016-11-25 10:33:42 +01003124 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003125 if (err)
3126 return err;
3127
Nogah Frankel45449132016-11-25 10:33:35 +01003128 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003129 err = mlxsw_core_trap_register(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003130 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003131 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003132 if (err)
Nogah Frankel45449132016-11-25 10:33:35 +01003133 goto err_listener_register;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003134
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003135 }
3136 return 0;
3137
Nogah Frankel45449132016-11-25 10:33:35 +01003138err_listener_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003139 for (i--; i >= 0; i--) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003140 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003141 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003142 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003143 }
3144 return err;
3145}
3146
3147static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3148{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003149 int i;
3150
Nogah Frankel45449132016-11-25 10:33:35 +01003151 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
Nogah Frankel14eeda92016-11-25 10:33:32 +01003152 mlxsw_core_trap_unregister(mlxsw_sp->core,
Nogah Frankel45449132016-11-25 10:33:35 +01003153 &mlxsw_sp_listener[i],
Nogah Frankel14eeda92016-11-25 10:33:32 +01003154 mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003155 }
3156}
3157
3158static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3159 enum mlxsw_reg_sfgc_type type,
3160 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3161{
3162 enum mlxsw_flood_table_type table_type;
3163 enum mlxsw_sp_flood_table flood_table;
3164 char sfgc_pl[MLXSW_REG_SFGC_LEN];
3165
Ido Schimmel19ae6122015-12-15 16:03:39 +01003166 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003167 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
Ido Schimmel19ae6122015-12-15 16:03:39 +01003168 else
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003169 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
Ido Schimmel19ae6122015-12-15 16:03:39 +01003170
Nogah Frankel71c365b2017-02-09 14:54:46 +01003171 switch (type) {
3172 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
Ido Schimmel19ae6122015-12-15 16:03:39 +01003173 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003174 break;
3175 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
Nogah Frankel71c365b2017-02-09 14:54:46 +01003176 flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3177 break;
3178 default:
3179 flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3180 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003181
3182 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3183 flood_table);
3184 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3185}
3186
3187static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3188{
3189 int type, err;
3190
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003191 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3192 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3193 continue;
3194
3195 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3196 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3197 if (err)
3198 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003199
3200 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3201 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3202 if (err)
3203 return err;
3204 }
3205
3206 return 0;
3207}
3208
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003209static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3210{
3211 char slcr_pl[MLXSW_REG_SLCR_LEN];
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003212 int err;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003213
3214 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3215 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3216 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3217 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3218 MLXSW_REG_SLCR_LAG_HASH_SIP |
3219 MLXSW_REG_SLCR_LAG_HASH_DIP |
3220 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3221 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3222 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003223 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3224 if (err)
3225 return err;
3226
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003227 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3228 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003229 return -EIO;
3230
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003231 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003232 sizeof(struct mlxsw_sp_upper),
3233 GFP_KERNEL);
3234 if (!mlxsw_sp->lags)
3235 return -ENOMEM;
3236
3237 return 0;
3238}
3239
3240static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3241{
3242 kfree(mlxsw_sp->lags);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003243}
3244
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003245static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3246{
3247 char htgt_pl[MLXSW_REG_HTGT_LEN];
3248
Nogah Frankel579c82e2016-11-25 10:33:42 +01003249 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3250 MLXSW_REG_HTGT_INVALID_POLICER,
3251 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3252 MLXSW_REG_HTGT_DEFAULT_TC);
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003253 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3254}
3255
Jiri Pirkob2f10572016-04-08 19:11:23 +02003256static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003257 const struct mlxsw_bus_info *mlxsw_bus_info)
3258{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003259 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003260 int err;
3261
3262 mlxsw_sp->core = mlxsw_core;
3263 mlxsw_sp->bus_info = mlxsw_bus_info;
Ido Schimmel14d39462016-06-20 23:04:15 +02003264 INIT_LIST_HEAD(&mlxsw_sp->fids);
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02003265 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
Elad Raz3a49b4f2016-01-10 21:06:28 +01003266 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003267
3268 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3269 if (err) {
3270 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3271 return err;
3272 }
3273
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003274 err = mlxsw_sp_traps_init(mlxsw_sp);
3275 if (err) {
Nogah Frankel45449132016-11-25 10:33:35 +01003276 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3277 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003278 }
3279
3280 err = mlxsw_sp_flood_init(mlxsw_sp);
3281 if (err) {
3282 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3283 goto err_flood_init;
3284 }
3285
3286 err = mlxsw_sp_buffers_init(mlxsw_sp);
3287 if (err) {
3288 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3289 goto err_buffers_init;
3290 }
3291
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003292 err = mlxsw_sp_lag_init(mlxsw_sp);
3293 if (err) {
3294 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3295 goto err_lag_init;
3296 }
3297
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003298 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3299 if (err) {
3300 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3301 goto err_switchdev_init;
3302 }
3303
Ido Schimmel464dce12016-07-02 11:00:15 +02003304 err = mlxsw_sp_router_init(mlxsw_sp);
3305 if (err) {
3306 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3307 goto err_router_init;
3308 }
3309
Yotam Gigi763b4b72016-07-21 12:03:17 +02003310 err = mlxsw_sp_span_init(mlxsw_sp);
3311 if (err) {
3312 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3313 goto err_span_init;
3314 }
3315
Jiri Pirko22a67762017-02-03 10:29:07 +01003316 err = mlxsw_sp_acl_init(mlxsw_sp);
3317 if (err) {
3318 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3319 goto err_acl_init;
3320 }
3321
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003322 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3323 if (err) {
3324 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3325 goto err_counter_pool_init;
3326 }
3327
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003328 err = mlxsw_sp_ports_create(mlxsw_sp);
3329 if (err) {
3330 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3331 goto err_ports_create;
3332 }
3333
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003334 return 0;
3335
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003336err_ports_create:
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003337 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3338err_counter_pool_init:
Jiri Pirko22a67762017-02-03 10:29:07 +01003339 mlxsw_sp_acl_fini(mlxsw_sp);
3340err_acl_init:
Yotam Gigi763b4b72016-07-21 12:03:17 +02003341 mlxsw_sp_span_fini(mlxsw_sp);
3342err_span_init:
Ido Schimmel464dce12016-07-02 11:00:15 +02003343 mlxsw_sp_router_fini(mlxsw_sp);
3344err_router_init:
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003345 mlxsw_sp_switchdev_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003346err_switchdev_init:
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003347 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003348err_lag_init:
Jiri Pirko0f433fa2016-04-14 18:19:24 +02003349 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003350err_buffers_init:
3351err_flood_init:
3352 mlxsw_sp_traps_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003353 return err;
3354}
3355
Jiri Pirkob2f10572016-04-08 19:11:23 +02003356static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003357{
Jiri Pirkob2f10572016-04-08 19:11:23 +02003358 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003359
Ido Schimmelbbf2a472016-07-02 11:00:14 +02003360 mlxsw_sp_ports_remove(mlxsw_sp);
Arkadi Sharshevskyff7b0d22017-03-11 09:42:51 +01003361 mlxsw_sp_counter_pool_fini(mlxsw_sp);
Jiri Pirko22a67762017-02-03 10:29:07 +01003362 mlxsw_sp_acl_fini(mlxsw_sp);
Yotam Gigi763b4b72016-07-21 12:03:17 +02003363 mlxsw_sp_span_fini(mlxsw_sp);
Ido Schimmel464dce12016-07-02 11:00:15 +02003364 mlxsw_sp_router_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003365 mlxsw_sp_switchdev_fini(mlxsw_sp);
Nogah Frankelce0bd2b2016-09-20 11:16:50 +02003366 mlxsw_sp_lag_fini(mlxsw_sp);
Jiri Pirko5113bfd2016-05-06 22:20:59 +02003367 mlxsw_sp_buffers_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003368 mlxsw_sp_traps_fini(mlxsw_sp);
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02003369 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
Ido Schimmel14d39462016-06-20 23:04:15 +02003370 WARN_ON(!list_empty(&mlxsw_sp->fids));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003371}
3372
3373static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3374 .used_max_vepa_channels = 1,
3375 .max_vepa_channels = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003376 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01003377 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003378 .used_max_pgt = 1,
3379 .max_pgt = 0,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003380 .used_flood_tables = 1,
3381 .used_flood_mode = 1,
3382 .flood_mode = 3,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003383 .max_fid_offset_flood_tables = 3,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003384 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Nogah Frankel71c365b2017-02-09 14:54:46 +01003385 .max_fid_flood_tables = 3,
Ido Schimmel19ae6122015-12-15 16:03:39 +01003386 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003387 .used_max_ib_mc = 1,
3388 .max_ib_mc = 0,
3389 .used_max_pkey = 1,
3390 .max_pkey = 0,
Nogah Frankel403547d2016-09-20 11:16:52 +02003391 .used_kvd_split_data = 1,
3392 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3393 .kvd_hash_single_parts = 2,
3394 .kvd_hash_double_parts = 1,
Jiri Pirkoc6022422016-07-05 11:27:46 +02003395 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003396 .swid_config = {
3397 {
3398 .used_type = 1,
3399 .type = MLXSW_PORT_SWID_TYPE_ETH,
3400 }
3401 },
Nogah Frankel57d316b2016-07-21 12:03:09 +02003402 .resource_query_enable = 1,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003403};
3404
3405static struct mlxsw_driver mlxsw_sp_driver = {
Jiri Pirko1d20d232016-10-27 15:12:59 +02003406 .kind = mlxsw_sp_driver_name,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003407 .priv_size = sizeof(struct mlxsw_sp),
3408 .init = mlxsw_sp_init,
3409 .fini = mlxsw_sp_fini,
Nogah Frankel9d87fce2016-11-25 10:33:40 +01003410 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
Jiri Pirko2d0ed392016-04-14 18:19:30 +02003411 .port_split = mlxsw_sp_port_split,
3412 .port_unsplit = mlxsw_sp_port_unsplit,
3413 .sb_pool_get = mlxsw_sp_sb_pool_get,
3414 .sb_pool_set = mlxsw_sp_sb_pool_set,
3415 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3416 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3417 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3418 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3419 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3420 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3421 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3422 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3423 .txhdr_construct = mlxsw_sp_txhdr_construct,
3424 .txhdr_len = MLXSW_TXHDR_LEN,
3425 .profile = &mlxsw_sp_config_profile,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003426};
3427
Jiri Pirko22a67762017-02-03 10:29:07 +01003428bool mlxsw_sp_port_dev_check(const struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003429{
3430 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3431}
3432
Jiri Pirko1182e532017-03-06 21:25:20 +01003433static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
David Aherndd823642016-10-17 19:15:49 -07003434{
Jiri Pirko1182e532017-03-06 21:25:20 +01003435 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
David Aherndd823642016-10-17 19:15:49 -07003436 int ret = 0;
3437
3438 if (mlxsw_sp_port_dev_check(lower_dev)) {
Jiri Pirko1182e532017-03-06 21:25:20 +01003439 *p_mlxsw_sp_port = netdev_priv(lower_dev);
David Aherndd823642016-10-17 19:15:49 -07003440 ret = 1;
3441 }
3442
3443 return ret;
3444}
3445
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003446static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3447{
Jiri Pirko1182e532017-03-06 21:25:20 +01003448 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003449
3450 if (mlxsw_sp_port_dev_check(dev))
3451 return netdev_priv(dev);
3452
Jiri Pirko1182e532017-03-06 21:25:20 +01003453 mlxsw_sp_port = NULL;
3454 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003455
Jiri Pirko1182e532017-03-06 21:25:20 +01003456 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003457}
3458
Ido Schimmel4724ba562017-03-10 08:53:39 +01003459struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003460{
3461 struct mlxsw_sp_port *mlxsw_sp_port;
3462
3463 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3464 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3465}
3466
3467static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3468{
Jiri Pirko1182e532017-03-06 21:25:20 +01003469 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003470
3471 if (mlxsw_sp_port_dev_check(dev))
3472 return netdev_priv(dev);
3473
Jiri Pirko1182e532017-03-06 21:25:20 +01003474 mlxsw_sp_port = NULL;
3475 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3476 &mlxsw_sp_port);
David Aherndd823642016-10-17 19:15:49 -07003477
Jiri Pirko1182e532017-03-06 21:25:20 +01003478 return mlxsw_sp_port;
Jiri Pirko7ce856a2016-07-04 08:23:12 +02003479}
3480
3481struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3482{
3483 struct mlxsw_sp_port *mlxsw_sp_port;
3484
3485 rcu_read_lock();
3486 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3487 if (mlxsw_sp_port)
3488 dev_hold(mlxsw_sp_port->dev);
3489 rcu_read_unlock();
3490 return mlxsw_sp_port;
3491}
3492
3493void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3494{
3495 dev_put(mlxsw_sp_port->dev);
3496}
3497
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003498static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3499 u16 fid)
3500{
3501 if (mlxsw_sp_fid_is_vfid(fid))
3502 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3503 else
3504 return test_bit(fid, lag_port->active_vlans);
3505}
3506
3507static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3508 u16 fid)
Ido Schimmel039c49a2016-01-27 15:20:18 +01003509{
3510 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003511 u8 local_port = mlxsw_sp_port->local_port;
3512 u16 lag_id = mlxsw_sp_port->lag_id;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003513 u64 max_lag_members;
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003514 int i, count = 0;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003515
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003516 if (!mlxsw_sp_port->lagged)
3517 return true;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003518
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003519 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3520 MAX_LAG_MEMBERS);
3521 for (i = 0; i < max_lag_members; i++) {
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003522 struct mlxsw_sp_port *lag_port;
3523
3524 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3525 if (!lag_port || lag_port->local_port == local_port)
3526 continue;
3527 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3528 count++;
3529 }
3530
3531 return !count;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003532}
3533
3534static int
3535mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3536 u16 fid)
3537{
3538 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3539 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3540
3541 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3542 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3543 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3544 mlxsw_sp_port->local_port);
3545
Ido Schimmel22305372016-06-20 23:04:21 +02003546 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3547 mlxsw_sp_port->local_port, fid);
3548
Ido Schimmel039c49a2016-01-27 15:20:18 +01003549 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3550}
3551
3552static int
Ido Schimmel039c49a2016-01-27 15:20:18 +01003553mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3554 u16 fid)
3555{
3556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3557 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3558
3559 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3560 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3561 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3562
Ido Schimmel22305372016-06-20 23:04:21 +02003563 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3564 mlxsw_sp_port->lag_id, fid);
3565
Ido Schimmel039c49a2016-01-27 15:20:18 +01003566 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3567}
3568
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003569int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
Ido Schimmel039c49a2016-01-27 15:20:18 +01003570{
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003571 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3572 return 0;
Ido Schimmel039c49a2016-01-27 15:20:18 +01003573
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003574 if (mlxsw_sp_port->lagged)
3575 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
Ido Schimmel039c49a2016-01-27 15:20:18 +01003576 fid);
3577 else
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003578 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
Ido Schimmel039c49a2016-01-27 15:20:18 +01003579}
3580
Ido Schimmel701b1862016-07-04 08:23:16 +02003581static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3582{
3583 struct mlxsw_sp_fid *f, *tmp;
3584
3585 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3586 if (--f->ref_count == 0)
3587 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3588 else
3589 WARN_ON_ONCE(1);
3590}
3591
Ido Schimmel7117a572016-06-20 23:04:06 +02003592static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3593 struct net_device *br_dev)
3594{
3595 return !mlxsw_sp->master_bridge.dev ||
3596 mlxsw_sp->master_bridge.dev == br_dev;
3597}
3598
3599static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3600 struct net_device *br_dev)
3601{
3602 mlxsw_sp->master_bridge.dev = br_dev;
3603 mlxsw_sp->master_bridge.ref_count++;
3604}
3605
3606static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3607{
Ido Schimmel701b1862016-07-04 08:23:16 +02003608 if (--mlxsw_sp->master_bridge.ref_count == 0) {
Ido Schimmel7117a572016-06-20 23:04:06 +02003609 mlxsw_sp->master_bridge.dev = NULL;
Ido Schimmel701b1862016-07-04 08:23:16 +02003610 /* It's possible upper VLAN devices are still holding
3611 * references to underlying FIDs. Drop the reference
3612 * and release the resources if it was the last one.
3613 * If it wasn't, then something bad happened.
3614 */
3615 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3616 }
Ido Schimmel7117a572016-06-20 23:04:06 +02003617}
3618
3619static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3620 struct net_device *br_dev)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003621{
3622 struct net_device *dev = mlxsw_sp_port->dev;
3623 int err;
3624
3625 /* When port is not bridged untagged packets are tagged with
3626 * PVID=VID=1, thereby creating an implicit VLAN interface in
3627 * the device. Remove it and let bridge code take care of its
3628 * own VLANs.
3629 */
3630 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003631 if (err)
3632 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003633
Ido Schimmel7117a572016-06-20 23:04:06 +02003634 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3635
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003636 mlxsw_sp_port->learning = 1;
3637 mlxsw_sp_port->learning_sync = 1;
3638 mlxsw_sp_port->uc_flood = 1;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003639 mlxsw_sp_port->mc_flood = 1;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01003640 mlxsw_sp_port->mc_router = 0;
3641 mlxsw_sp_port->mc_disabled = 1;
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003642 mlxsw_sp_port->bridged = 1;
3643
3644 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003645}
3646
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003647static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003648{
3649 struct net_device *dev = mlxsw_sp_port->dev;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01003650
Ido Schimmel28a01d22016-02-18 11:30:02 +01003651 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3652
Ido Schimmel7117a572016-06-20 23:04:06 +02003653 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3654
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01003655 mlxsw_sp_port->learning = 0;
3656 mlxsw_sp_port->learning_sync = 0;
3657 mlxsw_sp_port->uc_flood = 0;
Nogah Frankel71c365b2017-02-09 14:54:46 +01003658 mlxsw_sp_port->mc_flood = 0;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01003659 mlxsw_sp_port->mc_router = 0;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01003660 mlxsw_sp_port->bridged = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003661
3662 /* Add implicit VLAN interface in the device, so that untagged
3663 * packets will be classified to the default vFID.
3664 */
Ido Schimmel82e6db02016-06-20 23:04:04 +02003665 mlxsw_sp_port_add_vid(dev, 0, 1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003666}
3667
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003668static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003669{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003670 char sldr_pl[MLXSW_REG_SLDR_LEN];
3671
3672 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3673 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3674}
3675
3676static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3677{
3678 char sldr_pl[MLXSW_REG_SLDR_LEN];
3679
3680 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3681 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3682}
3683
3684static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3685 u16 lag_id, u8 port_index)
3686{
3687 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3688 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3689
3690 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3691 lag_id, port_index);
3692 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3693}
3694
3695static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3696 u16 lag_id)
3697{
3698 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3699 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3700
3701 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3702 lag_id);
3703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3704}
3705
3706static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3707 u16 lag_id)
3708{
3709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3710 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3711
3712 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3713 lag_id);
3714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3715}
3716
3717static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3718 u16 lag_id)
3719{
3720 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3721 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3722
3723 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3724 lag_id);
3725 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3726}
3727
3728static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3729 struct net_device *lag_dev,
3730 u16 *p_lag_id)
3731{
3732 struct mlxsw_sp_upper *lag;
3733 int free_lag_id = -1;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003734 u64 max_lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003735 int i;
3736
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003737 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3738 for (i = 0; i < max_lag; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003739 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3740 if (lag->ref_count) {
3741 if (lag->dev == lag_dev) {
3742 *p_lag_id = i;
3743 return 0;
3744 }
3745 } else if (free_lag_id < 0) {
3746 free_lag_id = i;
3747 }
3748 }
3749 if (free_lag_id < 0)
3750 return -EBUSY;
3751 *p_lag_id = free_lag_id;
3752 return 0;
3753}
3754
3755static bool
3756mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3757 struct net_device *lag_dev,
3758 struct netdev_lag_upper_info *lag_upper_info)
3759{
3760 u16 lag_id;
3761
3762 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3763 return false;
3764 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3765 return false;
3766 return true;
3767}
3768
3769static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3770 u16 lag_id, u8 *p_port_index)
3771{
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003772 u64 max_lag_members;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003773 int i;
3774
Jiri Pirkoc1a38312016-10-21 16:07:23 +02003775 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3776 MAX_LAG_MEMBERS);
3777 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003778 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3779 *p_port_index = i;
3780 return 0;
3781 }
3782 }
3783 return -EBUSY;
3784}
3785
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003786static void
3787mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel186962e2017-03-10 08:53:36 +01003788 struct net_device *lag_dev, u16 lag_id)
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003789{
3790 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel11943ff2016-07-02 11:00:12 +02003791 struct mlxsw_sp_fid *f;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003792
3793 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3794 if (WARN_ON(!mlxsw_sp_vport))
3795 return;
3796
Ido Schimmel11943ff2016-07-02 11:00:12 +02003797 /* If vPort is assigned a RIF, then leave it since it's no
3798 * longer valid.
3799 */
3800 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3801 if (f)
3802 f->leave(mlxsw_sp_vport);
3803
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003804 mlxsw_sp_vport->lag_id = lag_id;
3805 mlxsw_sp_vport->lagged = 1;
Ido Schimmel186962e2017-03-10 08:53:36 +01003806 mlxsw_sp_vport->dev = lag_dev;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003807}
3808
3809static void
3810mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3811{
3812 struct mlxsw_sp_port *mlxsw_sp_vport;
Ido Schimmel11943ff2016-07-02 11:00:12 +02003813 struct mlxsw_sp_fid *f;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003814
3815 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3816 if (WARN_ON(!mlxsw_sp_vport))
3817 return;
3818
Ido Schimmel11943ff2016-07-02 11:00:12 +02003819 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3820 if (f)
3821 f->leave(mlxsw_sp_vport);
3822
Ido Schimmel186962e2017-03-10 08:53:36 +01003823 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003824 mlxsw_sp_vport->lagged = 0;
3825}
3826
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003827static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3828 struct net_device *lag_dev)
3829{
3830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3831 struct mlxsw_sp_upper *lag;
3832 u16 lag_id;
3833 u8 port_index;
3834 int err;
3835
3836 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3837 if (err)
3838 return err;
3839 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3840 if (!lag->ref_count) {
3841 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3842 if (err)
3843 return err;
3844 lag->dev = lag_dev;
3845 }
3846
3847 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3848 if (err)
3849 return err;
3850 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3851 if (err)
3852 goto err_col_port_add;
3853 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3854 if (err)
3855 goto err_col_port_enable;
3856
3857 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3858 mlxsw_sp_port->local_port);
3859 mlxsw_sp_port->lag_id = lag_id;
3860 mlxsw_sp_port->lagged = 1;
3861 lag->ref_count++;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003862
Ido Schimmel186962e2017-03-10 08:53:36 +01003863 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003864
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003865 return 0;
3866
Ido Schimmel51554db2016-05-06 22:18:39 +02003867err_col_port_enable:
3868 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003869err_col_port_add:
3870 if (!lag->ref_count)
3871 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003872 return err;
3873}
3874
Ido Schimmel82e6db02016-06-20 23:04:04 +02003875static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3876 struct net_device *lag_dev)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003877{
3878 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003879 u16 lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel1c800752016-06-20 23:04:20 +02003880 struct mlxsw_sp_upper *lag;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003881
3882 if (!mlxsw_sp_port->lagged)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003883 return;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003884 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3885 WARN_ON(lag->ref_count == 0);
3886
Ido Schimmel82e6db02016-06-20 23:04:04 +02003887 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3888 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003889
Ido Schimmel4dc236c2016-01-27 15:20:16 +01003890 if (mlxsw_sp_port->bridged) {
3891 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003892 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01003893 }
3894
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02003895 if (lag->ref_count == 1)
Ido Schimmel82e6db02016-06-20 23:04:04 +02003896 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003897
3898 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3899 mlxsw_sp_port->local_port);
3900 mlxsw_sp_port->lagged = 0;
3901 lag->ref_count--;
Ido Schimmel86bf95b2016-07-02 11:00:11 +02003902
3903 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003904}
3905
Jiri Pirko74581202015-12-03 12:12:30 +01003906static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3907 u16 lag_id)
3908{
3909 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3910 char sldr_pl[MLXSW_REG_SLDR_LEN];
3911
3912 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3913 mlxsw_sp_port->local_port);
3914 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3915}
3916
3917static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3918 u16 lag_id)
3919{
3920 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3921 char sldr_pl[MLXSW_REG_SLDR_LEN];
3922
3923 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3924 mlxsw_sp_port->local_port);
3925 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3926}
3927
3928static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3929 bool lag_tx_enabled)
3930{
3931 if (lag_tx_enabled)
3932 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3933 mlxsw_sp_port->lag_id);
3934 else
3935 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3936 mlxsw_sp_port->lag_id);
3937}
3938
3939static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3940 struct netdev_lag_lower_state_info *info)
3941{
3942 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3943}
3944
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003945static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3946 struct net_device *vlan_dev)
3947{
3948 struct mlxsw_sp_port *mlxsw_sp_vport;
3949 u16 vid = vlan_dev_vlan_id(vlan_dev);
3950
3951 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel423b9372016-06-20 23:04:03 +02003952 if (WARN_ON(!mlxsw_sp_vport))
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003953 return -EINVAL;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003954
3955 mlxsw_sp_vport->dev = vlan_dev;
3956
3957 return 0;
3958}
3959
Ido Schimmel82e6db02016-06-20 23:04:04 +02003960static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3961 struct net_device *vlan_dev)
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003962{
3963 struct mlxsw_sp_port *mlxsw_sp_vport;
3964 u16 vid = vlan_dev_vlan_id(vlan_dev);
3965
3966 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel423b9372016-06-20 23:04:03 +02003967 if (WARN_ON(!mlxsw_sp_vport))
Ido Schimmel82e6db02016-06-20 23:04:04 +02003968 return;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003969
3970 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01003971}
3972
Jiri Pirko74581202015-12-03 12:12:30 +01003973static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3974 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003975{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003976 struct netdev_notifier_changeupper_info *info;
3977 struct mlxsw_sp_port *mlxsw_sp_port;
3978 struct net_device *upper_dev;
3979 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel80bedf12016-06-20 23:03:59 +02003980 int err = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003981
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003982 mlxsw_sp_port = netdev_priv(dev);
3983 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3984 info = ptr;
3985
3986 switch (event) {
3987 case NETDEV_PRECHANGEUPPER:
3988 upper_dev = info->upper_dev;
Ido Schimmel59fe9b32016-06-20 23:04:00 +02003989 if (!is_vlan_dev(upper_dev) &&
3990 !netif_is_lag_master(upper_dev) &&
Ido Schimmel7179eb52017-03-16 09:08:18 +01003991 !netif_is_bridge_master(upper_dev) &&
3992 !netif_is_l3_master(upper_dev))
Ido Schimmel59fe9b32016-06-20 23:04:00 +02003993 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02003994 if (!info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003995 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003996 /* HW limitation forbids to put ports to multiple bridges. */
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003997 if (netif_is_bridge_master(upper_dev) &&
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003998 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02003999 return -EINVAL;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004000 if (netif_is_lag_master(upper_dev) &&
4001 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4002 info->upper_info))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004003 return -EINVAL;
Ido Schimmel6ec43902016-06-20 23:04:01 +02004004 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4005 return -EINVAL;
4006 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4007 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4008 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004009 break;
4010 case NETDEV_CHANGEUPPER:
4011 upper_dev = info->upper_dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01004012 if (is_vlan_dev(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02004013 if (info->linking)
Ido Schimmel9589a7b52015-12-15 16:03:43 +01004014 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
4015 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004016 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02004017 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
4018 upper_dev);
Ido Schimmel9589a7b52015-12-15 16:03:43 +01004019 } else if (netif_is_bridge_master(upper_dev)) {
Ido Schimmel7117a572016-06-20 23:04:06 +02004020 if (info->linking)
4021 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4022 upper_dev);
4023 else
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004024 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004025 } else if (netif_is_lag_master(upper_dev)) {
Ido Schimmel80bedf12016-06-20 23:03:59 +02004026 if (info->linking)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004027 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4028 upper_dev);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004029 else
Ido Schimmel82e6db02016-06-20 23:04:04 +02004030 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4031 upper_dev);
Ido Schimmel7179eb52017-03-16 09:08:18 +01004032 } else if (netif_is_l3_master(upper_dev)) {
4033 if (info->linking)
4034 err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
4035 else
4036 mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
Ido Schimmel59fe9b32016-06-20 23:04:00 +02004037 } else {
4038 err = -EINVAL;
4039 WARN_ON(1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004040 }
4041 break;
4042 }
4043
Ido Schimmel80bedf12016-06-20 23:03:59 +02004044 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004045}
4046
Jiri Pirko74581202015-12-03 12:12:30 +01004047static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4048 unsigned long event, void *ptr)
4049{
4050 struct netdev_notifier_changelowerstate_info *info;
4051 struct mlxsw_sp_port *mlxsw_sp_port;
4052 int err;
4053
4054 mlxsw_sp_port = netdev_priv(dev);
4055 info = ptr;
4056
4057 switch (event) {
4058 case NETDEV_CHANGELOWERSTATE:
4059 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4060 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4061 info->lower_state_info);
4062 if (err)
4063 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4064 }
4065 break;
4066 }
4067
Ido Schimmel80bedf12016-06-20 23:03:59 +02004068 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004069}
4070
4071static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4072 unsigned long event, void *ptr)
4073{
4074 switch (event) {
4075 case NETDEV_PRECHANGEUPPER:
4076 case NETDEV_CHANGEUPPER:
4077 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4078 case NETDEV_CHANGELOWERSTATE:
4079 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4080 }
4081
Ido Schimmel80bedf12016-06-20 23:03:59 +02004082 return 0;
Jiri Pirko74581202015-12-03 12:12:30 +01004083}
4084
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004085static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4086 unsigned long event, void *ptr)
4087{
4088 struct net_device *dev;
4089 struct list_head *iter;
4090 int ret;
4091
4092 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4093 if (mlxsw_sp_port_dev_check(dev)) {
4094 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004095 if (ret)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004096 return ret;
4097 }
4098 }
4099
Ido Schimmel80bedf12016-06-20 23:03:59 +02004100 return 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004101}
4102
Ido Schimmel701b1862016-07-04 08:23:16 +02004103static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4104 struct net_device *vlan_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004105{
Ido Schimmel701b1862016-07-04 08:23:16 +02004106 u16 fid = vlan_dev_vlan_id(vlan_dev);
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004107 struct mlxsw_sp_fid *f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004108
Ido Schimmel701b1862016-07-04 08:23:16 +02004109 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4110 if (!f) {
4111 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4112 if (IS_ERR(f))
4113 return PTR_ERR(f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004114 }
4115
Ido Schimmel701b1862016-07-04 08:23:16 +02004116 f->ref_count++;
4117
4118 return 0;
4119}
4120
4121static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4122 struct net_device *vlan_dev)
4123{
4124 u16 fid = vlan_dev_vlan_id(vlan_dev);
4125 struct mlxsw_sp_fid *f;
4126
4127 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004128 if (f && f->rif)
4129 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel701b1862016-07-04 08:23:16 +02004130 if (f && --f->ref_count == 0)
4131 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4132}
4133
4134static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4135 unsigned long event, void *ptr)
4136{
4137 struct netdev_notifier_changeupper_info *info;
4138 struct net_device *upper_dev;
4139 struct mlxsw_sp *mlxsw_sp;
Ido Schimmelb4149702017-03-10 08:53:34 +01004140 int err = 0;
Ido Schimmel701b1862016-07-04 08:23:16 +02004141
4142 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4143 if (!mlxsw_sp)
4144 return 0;
Ido Schimmel701b1862016-07-04 08:23:16 +02004145
4146 info = ptr;
4147
4148 switch (event) {
Ido Schimmelb4149702017-03-10 08:53:34 +01004149 case NETDEV_PRECHANGEUPPER:
Ido Schimmel701b1862016-07-04 08:23:16 +02004150 upper_dev = info->upper_dev;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004151 if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
Ido Schimmelb4149702017-03-10 08:53:34 +01004152 return -EINVAL;
4153 if (is_vlan_dev(upper_dev) &&
4154 br_dev != mlxsw_sp->master_bridge.dev)
4155 return -EINVAL;
4156 break;
4157 case NETDEV_CHANGEUPPER:
4158 upper_dev = info->upper_dev;
4159 if (is_vlan_dev(upper_dev)) {
4160 if (info->linking)
4161 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4162 upper_dev);
4163 else
4164 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4165 upper_dev);
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004166 } else if (netif_is_l3_master(upper_dev)) {
4167 if (info->linking)
4168 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4169 br_dev);
4170 else
4171 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
Ido Schimmel701b1862016-07-04 08:23:16 +02004172 } else {
Ido Schimmelb4149702017-03-10 08:53:34 +01004173 err = -EINVAL;
4174 WARN_ON(1);
Ido Schimmel701b1862016-07-04 08:23:16 +02004175 }
4176 break;
4177 }
4178
Ido Schimmelb4149702017-03-10 08:53:34 +01004179 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004180}
4181
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004182static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004183{
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004184 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
Ido Schimmel99724c12016-07-04 08:23:14 +02004185 MLXSW_SP_VFID_MAX);
4186}
4187
4188static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4189{
4190 char sfmr_pl[MLXSW_REG_SFMR_LEN];
4191
4192 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004194}
4195
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004196static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
Ido Schimmel1c800752016-06-20 23:04:20 +02004197
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004198static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4199 struct net_device *br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004200{
4201 struct device *dev = mlxsw_sp->bus_info->dev;
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004202 struct mlxsw_sp_fid *f;
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004203 u16 vfid, fid;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004204 int err;
4205
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004206 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004207 if (vfid == MLXSW_SP_VFID_MAX) {
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004208 dev_err(dev, "No available vFIDs\n");
4209 return ERR_PTR(-ERANGE);
4210 }
4211
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004212 fid = mlxsw_sp_vfid_to_fid(vfid);
4213 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004214 if (err) {
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004215 dev_err(dev, "Failed to create FID=%d\n", fid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004216 return ERR_PTR(err);
4217 }
4218
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004219 f = kzalloc(sizeof(*f), GFP_KERNEL);
4220 if (!f)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004221 goto err_allocate_vfid;
4222
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004223 f->leave = mlxsw_sp_vport_vfid_leave;
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004224 f->fid = fid;
4225 f->dev = br_dev;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004226
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004227 list_add(&f->list, &mlxsw_sp->vfids.list);
4228 set_bit(vfid, mlxsw_sp->vfids.mapped);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004229
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004230 return f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004231
4232err_allocate_vfid:
Ido Schimmelc7e920b2016-06-20 23:04:09 +02004233 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004234 return ERR_PTR(-ENOMEM);
4235}
4236
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004237static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4238 struct mlxsw_sp_fid *f)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004239{
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004240 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004241 u16 fid = f->fid;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004242
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004243 clear_bit(vfid, mlxsw_sp->vfids.mapped);
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004244 list_del(&f->list);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004245
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004246 if (f->rif)
4247 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004248
Ido Schimmeld0ec8752016-06-20 23:04:12 +02004249 kfree(f);
Ido Schimmel99f44bb2016-07-04 08:23:17 +02004250
4251 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004252}
4253
Ido Schimmel99724c12016-07-04 08:23:14 +02004254static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4255 bool valid)
4256{
4257 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4258 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4259
4260 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4261 vid);
4262}
4263
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004264static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4265 struct net_device *br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004266{
Ido Schimmel0355b592016-06-20 23:04:13 +02004267 struct mlxsw_sp_fid *f;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004268 int err;
4269
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004270 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
Ido Schimmel0355b592016-06-20 23:04:13 +02004271 if (!f) {
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004272 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
Ido Schimmel0355b592016-06-20 23:04:13 +02004273 if (IS_ERR(f))
4274 return PTR_ERR(f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004275 }
4276
Ido Schimmel0355b592016-06-20 23:04:13 +02004277 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4278 if (err)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004279 goto err_vport_flood_set;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004280
Ido Schimmel0355b592016-06-20 23:04:13 +02004281 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4282 if (err)
4283 goto err_vport_fid_map;
Ido Schimmel6a9863a2016-02-15 13:19:54 +01004284
Ido Schimmel41b996c2016-06-20 23:04:17 +02004285 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
Ido Schimmel0355b592016-06-20 23:04:13 +02004286 f->ref_count++;
Ido Schimmel039c49a2016-01-27 15:20:18 +01004287
Ido Schimmel22305372016-06-20 23:04:21 +02004288 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4289
Ido Schimmel0355b592016-06-20 23:04:13 +02004290 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004291
Ido Schimmel9c4d4422016-06-20 23:04:10 +02004292err_vport_fid_map:
Ido Schimmel0355b592016-06-20 23:04:13 +02004293 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4294err_vport_flood_set:
4295 if (!f->ref_count)
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004296 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
Ido Schimmel0355b592016-06-20 23:04:13 +02004297 return err;
4298}
4299
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004300static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02004301{
Ido Schimmel41b996c2016-06-20 23:04:17 +02004302 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004303
Ido Schimmel22305372016-06-20 23:04:21 +02004304 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4305
Ido Schimmel0355b592016-06-20 23:04:13 +02004306 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4307
4308 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4309
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004310 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4311
Ido Schimmel41b996c2016-06-20 23:04:17 +02004312 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
Ido Schimmel0355b592016-06-20 23:04:13 +02004313 if (--f->ref_count == 0)
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004314 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004315}
4316
4317static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4318 struct net_device *br_dev)
4319{
Ido Schimmel99724c12016-07-04 08:23:14 +02004320 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004321 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4322 struct net_device *dev = mlxsw_sp_vport->dev;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004323 int err;
4324
Ido Schimmel99724c12016-07-04 08:23:14 +02004325 if (f && !WARN_ON(!f->leave))
4326 f->leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004327
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004328 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004329 if (err) {
Ido Schimmel0355b592016-06-20 23:04:13 +02004330 netdev_err(dev, "Failed to join vFID\n");
Ido Schimmel99724c12016-07-04 08:23:14 +02004331 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004332 }
4333
4334 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4335 if (err) {
4336 netdev_err(dev, "Failed to enable learning\n");
4337 goto err_port_vid_learning_set;
4338 }
4339
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004340 mlxsw_sp_vport->learning = 1;
4341 mlxsw_sp_vport->learning_sync = 1;
4342 mlxsw_sp_vport->uc_flood = 1;
Nogah Frankel71c365b2017-02-09 14:54:46 +01004343 mlxsw_sp_vport->mc_flood = 1;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01004344 mlxsw_sp_vport->mc_router = 0;
4345 mlxsw_sp_vport->mc_disabled = 1;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004346 mlxsw_sp_vport->bridged = 1;
4347
4348 return 0;
4349
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004350err_port_vid_learning_set:
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004351 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004352 return err;
4353}
4354
Ido Schimmelfe3f6d12016-06-20 23:04:19 +02004355static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
Ido Schimmel0355b592016-06-20 23:04:13 +02004356{
4357 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004358
4359 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4360
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004361 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
Ido Schimmel0355b592016-06-20 23:04:13 +02004362
Ido Schimmel0355b592016-06-20 23:04:13 +02004363 mlxsw_sp_vport->learning = 0;
4364 mlxsw_sp_vport->learning_sync = 0;
4365 mlxsw_sp_vport->uc_flood = 0;
Nogah Frankel71c365b2017-02-09 14:54:46 +01004366 mlxsw_sp_vport->mc_flood = 0;
Nogah Frankel8ecd4592017-02-09 14:54:47 +01004367 mlxsw_sp_vport->mc_router = 0;
Ido Schimmel0355b592016-06-20 23:04:13 +02004368 mlxsw_sp_vport->bridged = 0;
4369}
4370
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004371static bool
4372mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4373 const struct net_device *br_dev)
4374{
4375 struct mlxsw_sp_port *mlxsw_sp_vport;
4376
4377 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4378 vport.list) {
Ido Schimmel3ba2ebf2016-07-04 08:23:15 +02004379 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
Ido Schimmel56918b62016-06-20 23:04:18 +02004380
4381 if (dev && dev == br_dev)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004382 return false;
4383 }
4384
4385 return true;
4386}
4387
4388static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4389 unsigned long event, void *ptr,
4390 u16 vid)
4391{
4392 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4393 struct netdev_notifier_changeupper_info *info = ptr;
4394 struct mlxsw_sp_port *mlxsw_sp_vport;
4395 struct net_device *upper_dev;
Ido Schimmel80bedf12016-06-20 23:03:59 +02004396 int err = 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004397
4398 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
Ido Schimmel1f880612017-03-10 08:53:35 +01004399 if (!mlxsw_sp_vport)
4400 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004401
4402 switch (event) {
4403 case NETDEV_PRECHANGEUPPER:
4404 upper_dev = info->upper_dev;
Ido Schimmel7179eb52017-03-16 09:08:18 +01004405 if (!netif_is_bridge_master(upper_dev) &&
4406 !netif_is_l3_master(upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004407 return -EINVAL;
Ido Schimmelddbe9932016-06-20 23:04:02 +02004408 if (!info->linking)
4409 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004410 /* We can't have multiple VLAN interfaces configured on
4411 * the same port and being members in the same bridge.
4412 */
Ido Schimmel7179eb52017-03-16 09:08:18 +01004413 if (netif_is_bridge_master(upper_dev) &&
4414 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004415 upper_dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004416 return -EINVAL;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004417 break;
4418 case NETDEV_CHANGEUPPER:
4419 upper_dev = info->upper_dev;
Ido Schimmel1f880612017-03-10 08:53:35 +01004420 if (netif_is_bridge_master(upper_dev)) {
4421 if (info->linking)
4422 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4423 upper_dev);
4424 else
4425 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
Ido Schimmel7179eb52017-03-16 09:08:18 +01004426 } else if (netif_is_l3_master(upper_dev)) {
4427 if (info->linking)
4428 err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
4429 else
4430 mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004431 } else {
Ido Schimmel1f880612017-03-10 08:53:35 +01004432 err = -EINVAL;
4433 WARN_ON(1);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004434 }
Ido Schimmel1f880612017-03-10 08:53:35 +01004435 break;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004436 }
4437
Ido Schimmel80bedf12016-06-20 23:03:59 +02004438 return err;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004439}
4440
Ido Schimmel272c4472015-12-15 16:03:47 +01004441static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4442 unsigned long event, void *ptr,
4443 u16 vid)
4444{
4445 struct net_device *dev;
4446 struct list_head *iter;
4447 int ret;
4448
4449 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4450 if (mlxsw_sp_port_dev_check(dev)) {
4451 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4452 vid);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004453 if (ret)
Ido Schimmel272c4472015-12-15 16:03:47 +01004454 return ret;
4455 }
4456 }
4457
Ido Schimmel80bedf12016-06-20 23:03:59 +02004458 return 0;
Ido Schimmel272c4472015-12-15 16:03:47 +01004459}
4460
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004461static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4462 unsigned long event, void *ptr)
4463{
4464 struct netdev_notifier_changeupper_info *info;
4465 struct mlxsw_sp *mlxsw_sp;
4466 int err = 0;
4467
4468 mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4469 if (!mlxsw_sp)
4470 return 0;
4471
4472 info = ptr;
4473
4474 switch (event) {
4475 case NETDEV_PRECHANGEUPPER:
4476 /* VLAN devices are only allowed on top of the
4477 * VLAN-aware bridge.
4478 */
4479 if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
4480 mlxsw_sp->master_bridge.dev))
4481 return -EINVAL;
4482 if (!netif_is_l3_master(info->upper_dev))
4483 return -EINVAL;
4484 break;
4485 case NETDEV_CHANGEUPPER:
4486 if (netif_is_l3_master(info->upper_dev)) {
4487 if (info->linking)
4488 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4489 vlan_dev);
4490 else
4491 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
4492 } else {
4493 err = -EINVAL;
4494 WARN_ON(1);
4495 }
4496 break;
4497 }
4498
4499 return err;
4500}
4501
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004502static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4503 unsigned long event, void *ptr)
4504{
4505 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4506 u16 vid = vlan_dev_vlan_id(vlan_dev);
4507
Ido Schimmel272c4472015-12-15 16:03:47 +01004508 if (mlxsw_sp_port_dev_check(real_dev))
4509 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4510 vid);
4511 else if (netif_is_lag_master(real_dev))
4512 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4513 vid);
Ido Schimmel3d70e4582017-03-16 09:08:19 +01004514 else if (netif_is_bridge_master(real_dev))
4515 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
4516 ptr);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004517
Ido Schimmel80bedf12016-06-20 23:03:59 +02004518 return 0;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01004519}
4520
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004521static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4522 unsigned long event, void *ptr)
4523{
4524 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004525 int err = 0;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004526
Ido Schimmel6e095fd2016-07-04 08:23:13 +02004527 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4528 err = mlxsw_sp_netdevice_router_port_event(dev);
4529 else if (mlxsw_sp_port_dev_check(dev))
Ido Schimmel80bedf12016-06-20 23:03:59 +02004530 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4531 else if (netif_is_lag_master(dev))
4532 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
Ido Schimmel701b1862016-07-04 08:23:16 +02004533 else if (netif_is_bridge_master(dev))
4534 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
Ido Schimmel80bedf12016-06-20 23:03:59 +02004535 else if (is_vlan_dev(dev))
4536 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004537
Ido Schimmel80bedf12016-06-20 23:03:59 +02004538 return notifier_from_errno(err);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01004539}
4540
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004541static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4542 .notifier_call = mlxsw_sp_netdevice_event,
4543};
4544
Ido Schimmel99724c12016-07-04 08:23:14 +02004545static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4546 .notifier_call = mlxsw_sp_inetaddr_event,
4547 .priority = 10, /* Must be called before FIB notifier block */
4548};
4549
Jiri Pirkoe7322632016-09-01 10:37:43 +02004550static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4551 .notifier_call = mlxsw_sp_router_netevent_event,
4552};
4553
Jiri Pirko1d20d232016-10-27 15:12:59 +02004554static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4555 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4556 {0, },
4557};
4558
4559static struct pci_driver mlxsw_sp_pci_driver = {
4560 .name = mlxsw_sp_driver_name,
4561 .id_table = mlxsw_sp_pci_id_table,
4562};
4563
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004564static int __init mlxsw_sp_module_init(void)
4565{
4566 int err;
4567
4568 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004569 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004570 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4571
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004572 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4573 if (err)
4574 goto err_core_driver_register;
Jiri Pirko1d20d232016-10-27 15:12:59 +02004575
4576 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4577 if (err)
4578 goto err_pci_driver_register;
4579
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004580 return 0;
4581
Jiri Pirko1d20d232016-10-27 15:12:59 +02004582err_pci_driver_register:
4583 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004584err_core_driver_register:
Jiri Pirkoe7322632016-09-01 10:37:43 +02004585 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Jiri Pirkode7d6292016-09-01 10:37:42 +02004586 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004587 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4588 return err;
4589}
4590
4591static void __exit mlxsw_sp_module_exit(void)
4592{
Jiri Pirko1d20d232016-10-27 15:12:59 +02004593 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004594 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
Jiri Pirkoe7322632016-09-01 10:37:43 +02004595 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
Ido Schimmel99724c12016-07-04 08:23:14 +02004596 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02004597 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4598}
4599
4600module_init(mlxsw_sp_module_init);
4601module_exit(mlxsw_sp_module_exit);
4602
4603MODULE_LICENSE("Dual BSD/GPL");
4604MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4605MODULE_DESCRIPTION("Mellanox Spectrum driver");
Jiri Pirko1d20d232016-10-27 15:12:59 +02004606MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);