blob: f477b67730bb8186bc6c3089ce365aa60bba65d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Gortmaker3396c782012-01-27 13:36:01 +00002 * drivers/net/ethernet/freescale/gianfar_ethtool.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000012 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -040014 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * by reference.
17 */
18
Joe Perches59deab22011-06-14 08:57:47 +000019#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/string.h>
23#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
David S. Miller65a85a82012-04-06 00:35:34 -040028#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/uaccess.h>
36#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/crc32.h>
38#include <asm/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ethtool.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040040#include <linux/mii.h>
41#include <linux/phy.h>
Sebastian Poehn4aa3a712011-06-20 13:57:59 -070042#include <linux/sort.h>
Sebastian Poehn380b1532011-07-07 04:30:29 -070043#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include "gianfar.h"
46
Andy Flemingbb40dcb2005-09-23 22:54:21 -040047#define GFAR_MAX_COAL_USECS 0xffff
48#define GFAR_MAX_COAL_FRAMES 0xff
Kumar Gala0bbaf062005-06-20 10:54:21 -050049static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000050 u64 *buf);
Kumar Gala0bbaf062005-06-20 10:54:21 -050051static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000052static int gfar_gcoalesce(struct net_device *dev,
53 struct ethtool_coalesce *cvals);
54static int gfar_scoalesce(struct net_device *dev,
55 struct ethtool_coalesce *cvals);
56static void gfar_gringparam(struct net_device *dev,
57 struct ethtool_ringparam *rvals);
58static int gfar_sringparam(struct net_device *dev,
59 struct ethtool_ringparam *rvals);
60static void gfar_gdrvinfo(struct net_device *dev,
61 struct ethtool_drvinfo *drvinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Paul Gortmaker30f7e312012-01-08 13:21:57 -050063static const char stat_gstrings[][ETH_GSTRING_LEN] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 "rx-large-frame-errors",
65 "rx-short-frame-errors",
66 "rx-non-octet-errors",
67 "rx-crc-errors",
68 "rx-overrun-errors",
69 "rx-busy-errors",
70 "rx-babbling-errors",
71 "rx-truncated-frames",
72 "ethernet-bus-error",
73 "tx-babbling-errors",
74 "tx-underrun-errors",
75 "rx-skb-missing-errors",
76 "tx-timeout-errors",
77 "tx-rx-64-frames",
78 "tx-rx-65-127-frames",
79 "tx-rx-128-255-frames",
80 "tx-rx-256-511-frames",
81 "tx-rx-512-1023-frames",
82 "tx-rx-1024-1518-frames",
83 "tx-rx-1519-1522-good-vlan",
84 "rx-bytes",
85 "rx-packets",
86 "rx-fcs-errors",
87 "receive-multicast-packet",
88 "receive-broadcast-packet",
89 "rx-control-frame-packets",
90 "rx-pause-frame-packets",
91 "rx-unknown-op-code",
92 "rx-alignment-error",
93 "rx-frame-length-error",
94 "rx-code-error",
95 "rx-carrier-sense-error",
96 "rx-undersize-packets",
97 "rx-oversize-packets",
98 "rx-fragmented-frames",
99 "rx-jabber-frames",
100 "rx-dropped-frames",
101 "tx-byte-counter",
102 "tx-packets",
103 "tx-multicast-packets",
104 "tx-broadcast-packets",
105 "tx-pause-control-frames",
106 "tx-deferral-packets",
107 "tx-excessive-deferral-packets",
108 "tx-single-collision-packets",
109 "tx-multiple-collision-packets",
110 "tx-late-collision-packets",
111 "tx-excessive-collision-packets",
112 "tx-total-collision",
113 "reserved",
114 "tx-dropped-frames",
115 "tx-jabber-frames",
116 "tx-fcs-errors",
117 "tx-control-frames",
118 "tx-oversize-frames",
119 "tx-undersize-frames",
120 "tx-fragmented-frames",
121};
122
Kumar Gala0bbaf062005-06-20 10:54:21 -0500123/* Fill in a buffer with the strings which correspond to the
124 * stats */
125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
126{
127 struct gfar_private *priv = netdev_priv(dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600128
Andy Flemingb31a1d82008-12-16 15:29:15 -0800129 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else
132 memcpy(buf, stat_gstrings,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000133 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/* Fill in an array of 64-bit statistics from various sources.
137 * This array will be appended to the end of the ethtool_stats
138 * structure, and returned to user space
139 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000140static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
141 u64 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
143 int i;
144 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000145 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Paul Gortmaker212079d2013-02-12 15:38:19 -0500146 atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Paul Gortmaker68719782013-02-12 15:28:35 -0500148 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
Paul Gortmaker212079d2013-02-12 15:38:19 -0500149 buf[i] = atomic64_read(&extra[i]);
Paul Gortmaker68719782013-02-12 15:28:35 -0500150
Andy Flemingb31a1d82008-12-16 15:29:15 -0800151 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000152 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Paul Gortmaker68719782013-02-12 15:28:35 -0500154 for (; i < GFAR_STATS_LEN; i++, rmon++)
155 buf[i] = (u64) gfar_read(rmon);
156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700159static int gfar_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700163 switch (sset) {
164 case ETH_SS_STATS:
Andy Flemingb31a1d82008-12-16 15:29:15 -0800165 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700166 return GFAR_STATS_LEN;
167 else
168 return GFAR_EXTRA_STATS_LEN;
169 default:
170 return -EOPNOTSUPP;
171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/* Fills in the drvinfo structure with some basic info */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000175static void gfar_gdrvinfo(struct net_device *dev,
176 struct ethtool_drvinfo *drvinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Jiri Pirko7826d432013-01-06 00:44:26 +0000178 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
179 strlcpy(drvinfo->version, gfar_driver_version,
180 sizeof(drvinfo->version));
181 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
182 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 drvinfo->regdump_len = 0;
184 drvinfo->eedump_len = 0;
185}
186
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400187
188static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
189{
190 struct gfar_private *priv = netdev_priv(dev);
191 struct phy_device *phydev = priv->phydev;
192
193 if (NULL == phydev)
194 return -ENODEV;
195
196 return phy_ethtool_sset(phydev, cmd);
197}
198
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/* Return the current settings in the ethtool_cmd structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500201static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
203 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400204 struct phy_device *phydev = priv->phydev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000205 struct gfar_priv_rx_q *rx_queue = NULL;
206 struct gfar_priv_tx_q *tx_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400208 if (NULL == phydev)
209 return -ENODEV;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000210 tx_queue = priv->tx_queue[0];
211 rx_queue = priv->rx_queue[0];
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400212
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000213 /* etsec-1.7 and older versions have only one txic
214 * and rxic regs although they support multiple queues */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000215 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
216 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400218 return phy_ethtool_gset(phydev, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221/* Return the length of the register structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500222static int gfar_reglen(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
224 return sizeof (struct gfar);
225}
226
227/* Return a dump of the GFAR register space */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000228static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
229 void *regbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 int i;
232 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000233 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 u32 *buf = (u32 *) regbuf;
235
236 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
Kumar Galacc8c6e32006-02-01 15:18:03 -0600237 buf[i] = gfar_read(&theregs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/* Convert microseconds to ethernet clock ticks, which changes
241 * depending on what speed the controller is running at */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000242static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
243 unsigned int usecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 unsigned int count;
246
247 /* The timer is different, depending on the interface speed */
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400248 switch (priv->phydev->speed) {
249 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 count = GFAR_GBIT_TIME;
251 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400252 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 count = GFAR_100_TIME;
254 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400255 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 default:
257 count = GFAR_10_TIME;
258 break;
259 }
260
261 /* Make sure we return a number greater than 0
262 * if usecs > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000263 return (usecs * 1000 + count - 1) / count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266/* Convert ethernet clock ticks to microseconds */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000267static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
268 unsigned int ticks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
270 unsigned int count;
271
272 /* The timer is different, depending on the interface speed */
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400273 switch (priv->phydev->speed) {
274 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 count = GFAR_GBIT_TIME;
276 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400277 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 count = GFAR_100_TIME;
279 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400280 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 default:
282 count = GFAR_10_TIME;
283 break;
284 }
285
286 /* Make sure we return a number greater than 0 */
287 /* if ticks is > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000288 return (ticks * count) / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291/* Get the coalescing parameters, and put them in the cvals
292 * structure. */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000293static int gfar_gcoalesce(struct net_device *dev,
294 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
296 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000297 struct gfar_priv_rx_q *rx_queue = NULL;
298 struct gfar_priv_tx_q *tx_queue = NULL;
Dai Harukib46a8452008-12-16 15:29:52 -0800299 unsigned long rxtime;
300 unsigned long rxcount;
301 unsigned long txtime;
302 unsigned long txcount;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400303
Andy Flemingb31a1d82008-12-16 15:29:15 -0800304 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500305 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400307 if (NULL == priv->phydev)
308 return -ENODEV;
309
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000310 rx_queue = priv->rx_queue[0];
311 tx_queue = priv->tx_queue[0];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000312
313 rxtime = get_ictt_value(rx_queue->rxic);
314 rxcount = get_icft_value(rx_queue->rxic);
315 txtime = get_ictt_value(tx_queue->txic);
316 txcount = get_icft_value(tx_queue->txic);
Dai Harukib46a8452008-12-16 15:29:52 -0800317 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
318 cvals->rx_max_coalesced_frames = rxcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Dai Harukib46a8452008-12-16 15:29:52 -0800320 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
321 cvals->tx_max_coalesced_frames = txcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 cvals->use_adaptive_rx_coalesce = 0;
324 cvals->use_adaptive_tx_coalesce = 0;
325
326 cvals->pkt_rate_low = 0;
327 cvals->rx_coalesce_usecs_low = 0;
328 cvals->rx_max_coalesced_frames_low = 0;
329 cvals->tx_coalesce_usecs_low = 0;
330 cvals->tx_max_coalesced_frames_low = 0;
331
332 /* When the packet rate is below pkt_rate_high but above
333 * pkt_rate_low (both measured in packets per second) the
334 * normal {rx,tx}_* coalescing parameters are used.
335 */
336
337 /* When the packet rate is (measured in packets per second)
338 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
339 * used.
340 */
341 cvals->pkt_rate_high = 0;
342 cvals->rx_coalesce_usecs_high = 0;
343 cvals->rx_max_coalesced_frames_high = 0;
344 cvals->tx_coalesce_usecs_high = 0;
345 cvals->tx_max_coalesced_frames_high = 0;
346
347 /* How often to do adaptive coalescing packet rate sampling,
348 * measured in seconds. Must not be zero.
349 */
350 cvals->rate_sample_interval = 0;
351
352 return 0;
353}
354
355/* Change the coalescing values.
356 * Both cvals->*_usecs and cvals->*_frames have to be > 0
357 * in order for coalescing to be active
358 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000359static int gfar_scoalesce(struct net_device *dev,
360 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200363 int i, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Andy Flemingb31a1d82008-12-16 15:29:15 -0800365 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500366 return -EOPNOTSUPP;
367
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400368 if (NULL == priv->phydev)
369 return -ENODEV;
370
371 /* Check the bounds of the values */
372 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
Joe Perches375d6a12013-04-13 19:03:18 +0000373 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
374 GFAR_MAX_COAL_USECS);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400375 return -EINVAL;
376 }
377
378 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
Joe Perches375d6a12013-04-13 19:03:18 +0000379 netdev_info(dev, "Coalescing is limited to %d frames\n",
380 GFAR_MAX_COAL_FRAMES);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400381 return -EINVAL;
382 }
383
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200384 /* Check the bounds of the values */
385 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
386 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
387 GFAR_MAX_COAL_USECS);
388 return -EINVAL;
389 }
390
391 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
392 netdev_info(dev, "Coalescing is limited to %d frames\n",
393 GFAR_MAX_COAL_FRAMES);
394 return -EINVAL;
395 }
396
397 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
398 cpu_relax();
399
400 /* Set up rx coalescing */
401 if ((cvals->rx_coalesce_usecs == 0) ||
402 (cvals->rx_max_coalesced_frames == 0)) {
403 for (i = 0; i < priv->num_rx_queues; i++)
404 priv->rx_queue[i]->rxcoalescing = 0;
405 } else {
406 for (i = 0; i < priv->num_rx_queues; i++)
407 priv->rx_queue[i]->rxcoalescing = 1;
408 }
409
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000410 for (i = 0; i < priv->num_rx_queues; i++) {
411 priv->rx_queue[i]->rxic = mk_ic_value(
412 cvals->rx_max_coalesced_frames,
413 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416 /* Set up tx coalescing */
417 if ((cvals->tx_coalesce_usecs == 0) ||
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000418 (cvals->tx_max_coalesced_frames == 0)) {
419 for (i = 0; i < priv->num_tx_queues; i++)
420 priv->tx_queue[i]->txcoalescing = 0;
421 } else {
422 for (i = 0; i < priv->num_tx_queues; i++)
423 priv->tx_queue[i]->txcoalescing = 1;
424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000426 for (i = 0; i < priv->num_tx_queues; i++) {
427 priv->tx_queue[i]->txic = mk_ic_value(
428 cvals->tx_max_coalesced_frames,
429 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
430 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200432 if (dev->flags & IFF_UP) {
433 stop_gfar(dev);
434 err = startup_gfar(dev);
435 } else {
436 gfar_mac_reset(priv);
437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Claudiu Manoilf19015b2014-02-24 12:13:46 +0200439 clear_bit_unlock(GFAR_RESETTING, &priv->state);
440
441 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442}
443
444/* Fills in rvals with the current ring parameters. Currently,
445 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
446 * jumbo are ignored by the driver */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000447static void gfar_gringparam(struct net_device *dev,
448 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
450 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000451 struct gfar_priv_tx_q *tx_queue = NULL;
452 struct gfar_priv_rx_q *rx_queue = NULL;
453
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000454 tx_queue = priv->tx_queue[0];
455 rx_queue = priv->rx_queue[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
458 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
459 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
460 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
461
462 /* Values changeable by the user. The valid values are
463 * in the range 1 to the "*_max_pending" counterpart above.
464 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000465 rvals->rx_pending = rx_queue->rx_ring_size;
466 rvals->rx_mini_pending = rx_queue->rx_ring_size;
467 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
468 rvals->tx_pending = tx_queue->tx_ring_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
471/* Change the current ring parameters, stopping the controller if
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200472 * necessary so that we don't mess things up while we're in motion.
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000473 */
474static int gfar_sringparam(struct net_device *dev,
475 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200478 int err = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
480 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
481 return -EINVAL;
482
483 if (!is_power_of_2(rvals->rx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000484 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 return -EINVAL;
486 }
487
488 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
489 return -EINVAL;
490
491 if (!is_power_of_2(rvals->tx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000492 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 return -EINVAL;
494 }
495
Claudiu Manoil08511332014-02-24 12:13:45 +0200496 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
497 cpu_relax();
498
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200499 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200502 /* Change the sizes */
503 for (i = 0; i < priv->num_rx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000504 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200505
506 for (i = 0; i < priv->num_tx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000507 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Kumar Gala0bbaf062005-06-20 10:54:21 -0500509 /* Rebuild the rings with the new size */
Claudiu Manoil08511332014-02-24 12:13:45 +0200510 if (dev->flags & IFF_UP)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500511 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200512
513 clear_bit_unlock(GFAR_RESETTING, &priv->state);
514
Kumar Gala0bbaf062005-06-20 10:54:21 -0500515 return err;
516}
517
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300518static void gfar_gpauseparam(struct net_device *dev,
519 struct ethtool_pauseparam *epause)
520{
521 struct gfar_private *priv = netdev_priv(dev);
522
523 epause->autoneg = !!priv->pause_aneg_en;
524 epause->rx_pause = !!priv->rx_pause_en;
525 epause->tx_pause = !!priv->tx_pause_en;
526}
527
528static int gfar_spauseparam(struct net_device *dev,
529 struct ethtool_pauseparam *epause)
530{
531 struct gfar_private *priv = netdev_priv(dev);
532 struct phy_device *phydev = priv->phydev;
533 struct gfar __iomem *regs = priv->gfargrp[0].regs;
534 u32 oldadv, newadv;
535
Claudiu Manoil98a46d42014-04-23 16:38:47 +0300536 if (!phydev)
537 return -ENODEV;
538
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300539 if (!(phydev->supported & SUPPORTED_Pause) ||
540 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
541 (epause->rx_pause != epause->tx_pause)))
542 return -EINVAL;
543
544 priv->rx_pause_en = priv->tx_pause_en = 0;
545 if (epause->rx_pause) {
546 priv->rx_pause_en = 1;
547
548 if (epause->tx_pause) {
549 priv->tx_pause_en = 1;
550 /* FLOW_CTRL_RX & TX */
551 newadv = ADVERTISED_Pause;
552 } else /* FLOW_CTLR_RX */
553 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
554 } else if (epause->tx_pause) {
555 priv->tx_pause_en = 1;
556 /* FLOW_CTLR_TX */
557 newadv = ADVERTISED_Asym_Pause;
558 } else
559 newadv = 0;
560
561 if (epause->autoneg)
562 priv->pause_aneg_en = 1;
563 else
564 priv->pause_aneg_en = 0;
565
566 oldadv = phydev->advertising &
567 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
568 if (oldadv != newadv) {
569 phydev->advertising &=
570 ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
571 phydev->advertising |= newadv;
572 if (phydev->autoneg)
573 /* inform link partner of our
574 * new flow ctrl settings
575 */
576 return phy_start_aneg(phydev);
577
578 if (!epause->autoneg) {
579 u32 tempval;
580 tempval = gfar_read(&regs->maccfg1);
581 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200582
583 priv->tx_actual_en = 0;
584 if (priv->tx_pause_en) {
585 priv->tx_actual_en = 1;
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300586 tempval |= MACCFG1_TX_FLOW;
Matei Pavaluca45b679c92014-10-27 10:42:44 +0200587 }
588
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300589 if (priv->rx_pause_en)
590 tempval |= MACCFG1_RX_FLOW;
591 gfar_write(&regs->maccfg1, tempval);
592 }
593 }
594
595 return 0;
596}
597
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000598int gfar_set_features(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500599{
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000600 netdev_features_t changed = dev->features ^ features;
Claudiu Manoil08511332014-02-24 12:13:45 +0200601 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200602 int err = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500603
Claudiu Manoil88302642014-02-24 12:13:43 +0200604 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
605 NETIF_F_RXCSUM)))
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000606 return 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000607
Claudiu Manoil08511332014-02-24 12:13:45 +0200608 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
609 cpu_relax();
610
Claudiu Manoil88302642014-02-24 12:13:43 +0200611 dev->features = features;
612
Kumar Gala0bbaf062005-06-20 10:54:21 -0500613 if (dev->flags & IFF_UP) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500614 /* Now we take down the rings to rebuild them */
615 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200617 } else {
618 gfar_mac_reset(priv);
Dai Haruki12dea572008-12-16 15:30:20 -0800619 }
Claudiu Manoil08511332014-02-24 12:13:45 +0200620
621 clear_bit_unlock(GFAR_RESETTING, &priv->state);
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 return err;
624}
625
Kumar Gala0bbaf062005-06-20 10:54:21 -0500626static uint32_t gfar_get_msglevel(struct net_device *dev)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400627{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500628 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000629
Kumar Gala0bbaf062005-06-20 10:54:21 -0500630 return priv->msg_enable;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400631}
632
Kumar Gala0bbaf062005-06-20 10:54:21 -0500633static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400634{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500635 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000636
Kumar Gala0bbaf062005-06-20 10:54:21 -0500637 priv->msg_enable = data;
638}
639
Scott Woodd87eb122008-07-11 18:04:45 -0500640#ifdef CONFIG_PM
641static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
642{
643 struct gfar_private *priv = netdev_priv(dev);
644
Andy Flemingb31a1d82008-12-16 15:29:15 -0800645 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
Scott Woodd87eb122008-07-11 18:04:45 -0500646 wol->supported = WAKE_MAGIC;
647 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
648 } else {
649 wol->supported = wol->wolopts = 0;
650 }
651}
652
653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
654{
655 struct gfar_private *priv = netdev_priv(dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500656
Andy Flemingb31a1d82008-12-16 15:29:15 -0800657 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -0500658 wol->wolopts != 0)
659 return -EINVAL;
660
661 if (wol->wolopts & ~WAKE_MAGIC)
662 return -EINVAL;
663
Rafael J. Wysocki6c4f1992010-11-09 11:54:19 +0000664 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
665
Claudiu Manoilb0734b62015-07-31 18:38:33 +0300666 priv->wol_en = !!device_may_wakeup(&dev->dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500667
668 return 0;
669}
670#endif
Kumar Gala0bbaf062005-06-20 10:54:21 -0500671
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000672static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
673{
674 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
675
676 if (ethflow & RXH_L2DA) {
677 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000678 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000679 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
680 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000681 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
682 priv->cur_filer_idx = priv->cur_filer_idx - 1;
683
684 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000685 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000686 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
687 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000688 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
689 priv->cur_filer_idx = priv->cur_filer_idx - 1;
690 }
691
692 if (ethflow & RXH_VLAN) {
693 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000694 RQFCR_AND | RQFCR_HASHTBL_0;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000695 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000696 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
697 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000698 priv->cur_filer_idx = priv->cur_filer_idx - 1;
699 }
700
701 if (ethflow & RXH_IP_SRC) {
702 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000703 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000704 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
705 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000706 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
707 priv->cur_filer_idx = priv->cur_filer_idx - 1;
708 }
709
710 if (ethflow & (RXH_IP_DST)) {
711 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000712 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000713 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
714 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000715 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
716 priv->cur_filer_idx = priv->cur_filer_idx - 1;
717 }
718
719 if (ethflow & RXH_L3_PROTO) {
720 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000721 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000722 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
723 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000724 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
725 priv->cur_filer_idx = priv->cur_filer_idx - 1;
726 }
727
728 if (ethflow & RXH_L4_B_0_1) {
729 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000730 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000731 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
732 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000733 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
734 priv->cur_filer_idx = priv->cur_filer_idx - 1;
735 }
736
737 if (ethflow & RXH_L4_B_2_3) {
738 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000739 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000740 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
741 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000742 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
743 priv->cur_filer_idx = priv->cur_filer_idx - 1;
744 }
745}
746
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000747static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
748 u64 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000749{
750 unsigned int last_rule_idx = priv->cur_filer_idx;
751 unsigned int cmp_rqfpr;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000752 unsigned int *local_rqfpr;
753 unsigned int *local_rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000754 int i = 0x0, k = 0x0;
755 int j = MAX_FILER_IDX, l = 0x0;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000756 int ret = 1;
757
Joe Perchesb2adaca2013-02-03 17:43:58 +0000758 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
759 GFP_KERNEL);
760 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
761 GFP_KERNEL);
Wang Shaoyan588dc912011-08-11 17:07:25 +0000762 if (!local_rqfpr || !local_rqfcr) {
Wang Shaoyan588dc912011-08-11 17:07:25 +0000763 ret = 0;
764 goto err;
765 }
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000766
767 switch (class) {
768 case TCP_V4_FLOW:
769 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
770 break;
771 case UDP_V4_FLOW:
772 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
773 break;
774 case TCP_V6_FLOW:
775 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
776 break;
777 case UDP_V6_FLOW:
778 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
779 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000780 default:
Joe Perches375d6a12013-04-13 19:03:18 +0000781 netdev_err(priv->ndev,
782 "Right now this class is not supported\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000783 ret = 0;
784 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000785 }
786
787 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000788 local_rqfpr[j] = priv->ftp_rqfpr[i];
789 local_rqfcr[j] = priv->ftp_rqfcr[i];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000790 j--;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000791 if ((priv->ftp_rqfcr[i] ==
792 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
793 (priv->ftp_rqfpr[i] == cmp_rqfpr))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000794 break;
795 }
796
797 if (i == MAX_FILER_IDX + 1) {
Joe Perches375d6a12013-04-13 19:03:18 +0000798 netdev_err(priv->ndev,
799 "No parse rule found, can't create hash rules\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000800 ret = 0;
801 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000802 }
803
804 /* If a match was found, then it begins the starting of a cluster rule
805 * if it was already programmed, we need to overwrite these rules
806 */
807 for (l = i+1; l < MAX_FILER_IDX; l++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000808 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000809 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000810 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000811 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000812 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
813 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000814 priv->ftp_rqfpr[l]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000815 break;
816 }
817
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000818 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
819 (priv->ftp_rqfcr[l] & RQFCR_AND))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000820 continue;
821 else {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000822 local_rqfpr[j] = priv->ftp_rqfpr[l];
823 local_rqfcr[j] = priv->ftp_rqfcr[l];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000824 j--;
825 }
826 }
827
828 priv->cur_filer_idx = l - 1;
829 last_rule_idx = l;
830
831 /* hash rules */
832 ethflow_to_filer_rules(priv, ethflow);
833
834 /* Write back the popped out rules again */
835 for (k = j+1; k < MAX_FILER_IDX; k++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000836 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
837 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000838 gfar_write_filer(priv, priv->cur_filer_idx,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000839 local_rqfcr[k], local_rqfpr[k]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000840 if (!priv->cur_filer_idx)
841 break;
842 priv->cur_filer_idx = priv->cur_filer_idx - 1;
843 }
844
Wang Shaoyan588dc912011-08-11 17:07:25 +0000845err:
846 kfree(local_rqfcr);
847 kfree(local_rqfpr);
848 return ret;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000849}
850
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000851static int gfar_set_hash_opts(struct gfar_private *priv,
852 struct ethtool_rxnfc *cmd)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000853{
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000854 /* write the filer rules here */
855 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
Ben Hutchingsbde35282011-04-08 13:45:11 +0000856 return -EINVAL;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000857
858 return 0;
859}
860
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700861static int gfar_check_filer_hardware(struct gfar_private *priv)
862{
Claudiu Manoil42851e82014-01-14 15:35:00 +0200863 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700864 u32 i;
865
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700866 /* Check if we are in FIFO mode */
867 i = gfar_read(&regs->ecntrl);
868 i &= ECNTRL_FIFM;
869 if (i == ECNTRL_FIFM) {
870 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
871 i = gfar_read(&regs->rctrl);
872 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
873 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
874 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000875 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700876 } else {
877 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000878 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700879 return -EOPNOTSUPP;
880 }
881 }
882 /* Or in standard mode */
883 else {
884 i = gfar_read(&regs->rctrl);
885 i &= RCTRL_PRSDEP_MASK;
886 if (i == RCTRL_PRSDEP_MASK) {
887 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000888 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700889 } else {
890 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000891 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700892 return -EOPNOTSUPP;
893 }
894 }
895
896 /* Sets the properties for arbitrary filer rule
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000897 * to the first 4 Layer 4 Bytes
898 */
Claudiu Manoil42851e82014-01-14 15:35:00 +0200899 gfar_write(&regs->rbifx, 0xC0C1C2C3);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700900 return 0;
901}
902
903static int gfar_comp_asc(const void *a, const void *b)
904{
905 return memcmp(a, b, 4);
906}
907
908static int gfar_comp_desc(const void *a, const void *b)
909{
910 return -memcmp(a, b, 4);
911}
912
913static void gfar_swap(void *a, void *b, int size)
914{
915 u32 *_a = a;
916 u32 *_b = b;
917
918 swap(_a[0], _b[0]);
919 swap(_a[1], _b[1]);
920 swap(_a[2], _b[2]);
921 swap(_a[3], _b[3]);
922}
923
924/* Write a mask to filer cache */
925static void gfar_set_mask(u32 mask, struct filer_table *tab)
926{
927 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
928 tab->fe[tab->index].prop = mask;
929 tab->index++;
930}
931
932/* Sets parse bits (e.g. IP or TCP) */
933static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
934{
935 gfar_set_mask(mask, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000936 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
937 RQFCR_AND;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700938 tab->fe[tab->index].prop = value;
939 tab->index++;
940}
941
942static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000943 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700944{
945 gfar_set_mask(mask, tab);
946 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
947 tab->fe[tab->index].prop = value;
948 tab->index++;
949}
950
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000951/* For setting a tuple of value and mask of type flag
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700952 * Example:
953 * IP-Src = 10.0.0.0/255.0.0.0
954 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
955 *
956 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
957 * For a don't care mask it gives us a 0
958 *
959 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
960 * and MAC stuff on an upper level (due to missing information on this level).
961 * For these guys we can discard them if they are value=0 and mask=0.
962 *
963 * Further the all masks are one-padded for better hardware efficiency.
964 */
965static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000966 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700967{
968 switch (flag) {
Sebastian Poehn380b1532011-07-07 04:30:29 -0700969 /* 3bit */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700970 case RQFCR_PID_PRI:
971 if (!(value | mask))
972 return;
973 mask |= RQFCR_PID_PRI_MASK;
974 break;
975 /* 8bit */
976 case RQFCR_PID_L4P:
977 case RQFCR_PID_TOS:
978 if (!~(mask | RQFCR_PID_L4P_MASK))
979 return;
980 if (!mask)
981 mask = ~0;
982 else
983 mask |= RQFCR_PID_L4P_MASK;
984 break;
985 /* 12bit */
986 case RQFCR_PID_VID:
987 if (!(value | mask))
988 return;
989 mask |= RQFCR_PID_VID_MASK;
990 break;
991 /* 16bit */
992 case RQFCR_PID_DPT:
993 case RQFCR_PID_SPT:
994 case RQFCR_PID_ETY:
995 if (!~(mask | RQFCR_PID_PORT_MASK))
996 return;
997 if (!mask)
998 mask = ~0;
999 else
1000 mask |= RQFCR_PID_PORT_MASK;
1001 break;
1002 /* 24bit */
1003 case RQFCR_PID_DAH:
1004 case RQFCR_PID_DAL:
1005 case RQFCR_PID_SAH:
1006 case RQFCR_PID_SAL:
1007 if (!(value | mask))
1008 return;
1009 mask |= RQFCR_PID_MAC_MASK;
1010 break;
1011 /* for all real 32bit masks */
1012 default:
1013 if (!~mask)
1014 return;
1015 if (!mask)
1016 mask = ~0;
1017 break;
1018 }
1019 gfar_set_general_attribute(value, mask, flag, tab);
1020}
1021
1022/* Translates value and mask for UDP, TCP or SCTP */
1023static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001024 struct ethtool_tcpip4_spec *mask,
1025 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001026{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001027 gfar_set_attribute(be32_to_cpu(value->ip4src),
1028 be32_to_cpu(mask->ip4src),
1029 RQFCR_PID_SIA, tab);
1030 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1031 be32_to_cpu(mask->ip4dst),
1032 RQFCR_PID_DIA, tab);
1033 gfar_set_attribute(be16_to_cpu(value->pdst),
1034 be16_to_cpu(mask->pdst),
1035 RQFCR_PID_DPT, tab);
1036 gfar_set_attribute(be16_to_cpu(value->psrc),
1037 be16_to_cpu(mask->psrc),
1038 RQFCR_PID_SPT, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001039 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1040}
1041
1042/* Translates value and mask for RAW-IP4 */
1043static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001044 struct ethtool_usrip4_spec *mask,
1045 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001046{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001047 gfar_set_attribute(be32_to_cpu(value->ip4src),
1048 be32_to_cpu(mask->ip4src),
1049 RQFCR_PID_SIA, tab);
1050 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1051 be32_to_cpu(mask->ip4dst),
1052 RQFCR_PID_DIA, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001053 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1054 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
Claudiu Manoil42851e82014-01-14 15:35:00 +02001055 gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
1056 be32_to_cpu(mask->l4_4_bytes),
1057 RQFCR_PID_ARB, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001058
1059}
1060
1061/* Translates value and mask for ETHER spec */
1062static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001063 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001064{
1065 u32 upper_temp_mask = 0;
1066 u32 lower_temp_mask = 0;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001067
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001068 /* Source address */
1069 if (!is_broadcast_ether_addr(mask->h_source)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001070 if (is_zero_ether_addr(mask->h_source)) {
1071 upper_temp_mask = 0xFFFFFFFF;
1072 lower_temp_mask = 0xFFFFFFFF;
1073 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001074 upper_temp_mask = mask->h_source[0] << 16 |
1075 mask->h_source[1] << 8 |
1076 mask->h_source[2];
1077 lower_temp_mask = mask->h_source[3] << 16 |
1078 mask->h_source[4] << 8 |
1079 mask->h_source[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001080 }
1081 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001082 gfar_set_attribute(value->h_source[0] << 16 |
1083 value->h_source[1] << 8 |
1084 value->h_source[2],
1085 upper_temp_mask, RQFCR_PID_SAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001086 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001087 gfar_set_attribute(value->h_source[3] << 16 |
1088 value->h_source[4] << 8 |
1089 value->h_source[5],
1090 lower_temp_mask, RQFCR_PID_SAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001091 }
1092 /* Destination address */
1093 if (!is_broadcast_ether_addr(mask->h_dest)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001094 /* Special for destination is limited broadcast */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001095 if ((is_broadcast_ether_addr(value->h_dest) &&
1096 is_zero_ether_addr(mask->h_dest))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001097 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1098 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001099 if (is_zero_ether_addr(mask->h_dest)) {
1100 upper_temp_mask = 0xFFFFFFFF;
1101 lower_temp_mask = 0xFFFFFFFF;
1102 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001103 upper_temp_mask = mask->h_dest[0] << 16 |
1104 mask->h_dest[1] << 8 |
1105 mask->h_dest[2];
1106 lower_temp_mask = mask->h_dest[3] << 16 |
1107 mask->h_dest[4] << 8 |
1108 mask->h_dest[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001109 }
1110
1111 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001112 gfar_set_attribute(value->h_dest[0] << 16 |
1113 value->h_dest[1] << 8 |
1114 value->h_dest[2],
1115 upper_temp_mask, RQFCR_PID_DAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001116 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001117 gfar_set_attribute(value->h_dest[3] << 16 |
1118 value->h_dest[4] << 8 |
1119 value->h_dest[5],
1120 lower_temp_mask, RQFCR_PID_DAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001121 }
1122 }
1123
Claudiu Manoil42851e82014-01-14 15:35:00 +02001124 gfar_set_attribute(be16_to_cpu(value->h_proto),
1125 be16_to_cpu(mask->h_proto),
1126 RQFCR_PID_ETY, tab);
1127}
1128
1129static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1130{
1131 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1132}
1133
1134static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1135{
1136 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1137}
1138
1139static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1140{
1141 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1142}
1143
1144static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1145{
1146 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1147}
1148
1149static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1150{
1151 return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1152 VLAN_PRIO_SHIFT;
1153}
1154
1155static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1156{
1157 return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1158 VLAN_PRIO_SHIFT;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001159}
1160
1161/* Convert a rule to binary filter format of gianfar */
1162static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001163 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001164{
1165 u32 vlan = 0, vlan_mask = 0;
1166 u32 id = 0, id_mask = 0;
1167 u32 cfi = 0, cfi_mask = 0;
1168 u32 prio = 0, prio_mask = 0;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001169 u32 old_index = tab->index;
1170
1171 /* Check if vlan is wanted */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001172 if ((rule->flow_type & FLOW_EXT) &&
1173 (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001174 if (!rule->m_ext.vlan_tci)
Claudiu Manoil42851e82014-01-14 15:35:00 +02001175 rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001176
1177 vlan = RQFPR_VLN;
1178 vlan_mask = RQFPR_VLN;
1179
1180 /* Separate the fields */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001181 id = vlan_tci_vid(rule);
1182 id_mask = vlan_tci_vidm(rule);
1183 cfi = vlan_tci_cfi(rule);
1184 cfi_mask = vlan_tci_cfim(rule);
1185 prio = vlan_tci_prio(rule);
1186 prio_mask = vlan_tci_priom(rule);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001187
Sebastian Poehn380b1532011-07-07 04:30:29 -07001188 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001189 vlan |= RQFPR_CFI;
1190 vlan_mask |= RQFPR_CFI;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001191 } else if (cfi != VLAN_TAG_PRESENT &&
1192 cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001193 vlan_mask |= RQFPR_CFI;
1194 }
1195 }
1196
1197 switch (rule->flow_type & ~FLOW_EXT) {
1198 case TCP_V4_FLOW:
1199 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001200 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001201 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001202 &rule->m_u.tcp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001203 break;
1204 case UDP_V4_FLOW:
1205 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001206 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001207 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001208 &rule->m_u.udp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001209 break;
1210 case SCTP_V4_FLOW:
1211 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001212 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001213 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001214 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1215 (struct ethtool_tcpip4_spec *)&rule->m_u,
1216 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001217 break;
1218 case IP_USER_FLOW:
1219 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001220 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001221 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001222 (struct ethtool_usrip4_spec *) &rule->m_u,
1223 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001224 break;
1225 case ETHER_FLOW:
1226 if (vlan)
1227 gfar_set_parse_bits(vlan, vlan_mask, tab);
1228 gfar_set_ether((struct ethhdr *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001229 (struct ethhdr *) &rule->m_u, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001230 break;
1231 default:
1232 return -1;
1233 }
1234
1235 /* Set the vlan attributes in the end */
1236 if (vlan) {
1237 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1238 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1239 }
1240
1241 /* If there has been nothing written till now, it must be a default */
1242 if (tab->index == old_index) {
1243 gfar_set_mask(0xFFFFFFFF, tab);
1244 tab->fe[tab->index].ctrl = 0x20;
1245 tab->fe[tab->index].prop = 0x0;
1246 tab->index++;
1247 }
1248
1249 /* Remove last AND */
1250 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1251
1252 /* Specify which queue to use or to drop */
1253 if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1254 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1255 else
1256 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1257
1258 /* Only big enough entries can be clustered */
1259 if (tab->index > (old_index + 2)) {
1260 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1261 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1262 }
1263
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001264 /* In rare cases the cache can be full while there is
1265 * free space in hw
1266 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001267 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1268 return -EBUSY;
1269
1270 return 0;
1271}
1272
1273/* Copy size filer entries */
1274static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001275 struct gfar_filer_entry src[0], s32 size)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001276{
1277 while (size > 0) {
1278 size--;
1279 dst[size].ctrl = src[size].ctrl;
1280 dst[size].prop = src[size].prop;
1281 }
1282}
1283
1284/* Delete the contents of the filer-table between start and end
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001285 * and collapse them
1286 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001287static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1288{
1289 int length;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001290
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001291 if (end > MAX_FILER_CACHE_IDX || end < begin)
1292 return -EINVAL;
1293
1294 end++;
1295 length = end - begin;
1296
1297 /* Copy */
1298 while (end < tab->index) {
1299 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1300 tab->fe[begin++].prop = tab->fe[end++].prop;
1301
1302 }
1303 /* Fill up with don't cares */
1304 while (begin < tab->index) {
1305 tab->fe[begin].ctrl = 0x60;
1306 tab->fe[begin].prop = 0xFFFFFFFF;
1307 begin++;
1308 }
1309
1310 tab->index -= length;
1311 return 0;
1312}
1313
1314/* Make space on the wanted location */
1315static int gfar_expand_filer_entries(u32 begin, u32 length,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001316 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001317{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001318 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1319 begin > MAX_FILER_CACHE_IDX)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001320 return -EINVAL;
1321
1322 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001323 tab->index - length + 1);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001324
1325 tab->index += length;
1326 return 0;
1327}
1328
1329static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1330{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001331 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1332 start++) {
1333 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1334 (RQFCR_AND | RQFCR_CLE))
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001335 return start;
1336 }
1337 return -1;
1338}
1339
1340static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1341{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001342 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1343 start++) {
1344 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1345 (RQFCR_CLE))
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001346 return start;
1347 }
1348 return -1;
1349}
1350
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001351/* Uses hardwares clustering option to reduce
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001352 * the number of filer table entries
1353 */
1354static void gfar_cluster_filer(struct filer_table *tab)
1355{
1356 s32 i = -1, j, iend, jend;
1357
1358 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1359 j = i;
1360 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001361 /* The cluster entries self and the previous one
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001362 * (a mask) must be identical!
1363 */
1364 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1365 break;
1366 if (tab->fe[i].prop != tab->fe[j].prop)
1367 break;
1368 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1369 break;
1370 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1371 break;
1372 iend = gfar_get_next_cluster_end(i, tab);
1373 jend = gfar_get_next_cluster_end(j, tab);
1374 if (jend == -1 || iend == -1)
1375 break;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001376
1377 /* First we make some free space, where our cluster
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001378 * element should be. Then we copy it there and finally
1379 * delete in from its old location.
1380 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001381 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1382 -EINVAL)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001383 break;
1384
1385 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001386 &(tab->fe[jend + 1]), jend - j);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001387
1388 if (gfar_trim_filer_entries(jend - 1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001389 jend + (jend - j),
1390 tab) == -EINVAL)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001391 return;
1392
1393 /* Mask out cluster bit */
1394 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1395 }
1396 }
1397}
1398
Sebastian Poehn380b1532011-07-07 04:30:29 -07001399/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1400static void gfar_swap_bits(struct gfar_filer_entry *a1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001401 struct gfar_filer_entry *a2,
1402 struct gfar_filer_entry *b1,
1403 struct gfar_filer_entry *b2, u32 mask)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001404{
1405 u32 temp[4];
Sebastian Poehn380b1532011-07-07 04:30:29 -07001406 temp[0] = a1->ctrl & mask;
1407 temp[1] = a2->ctrl & mask;
1408 temp[2] = b1->ctrl & mask;
1409 temp[3] = b2->ctrl & mask;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001410
Sebastian Poehn380b1532011-07-07 04:30:29 -07001411 a1->ctrl &= ~mask;
1412 a2->ctrl &= ~mask;
1413 b1->ctrl &= ~mask;
1414 b2->ctrl &= ~mask;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001415
1416 a1->ctrl |= temp[1];
1417 a2->ctrl |= temp[0];
1418 b1->ctrl |= temp[3];
1419 b2->ctrl |= temp[2];
1420}
1421
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001422/* Generate a list consisting of masks values with their start and
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001423 * end of validity and block as indicator for parts belonging
1424 * together (glued by ANDs) in mask_table
1425 */
1426static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001427 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001428{
1429 u32 i, and_index = 0, block_index = 1;
1430
1431 for (i = 0; i < tab->index; i++) {
1432
1433 /* LSByte of control = 0 sets a mask */
1434 if (!(tab->fe[i].ctrl & 0xF)) {
1435 mask_table[and_index].mask = tab->fe[i].prop;
1436 mask_table[and_index].start = i;
1437 mask_table[and_index].block = block_index;
1438 if (and_index >= 1)
1439 mask_table[and_index - 1].end = i - 1;
1440 and_index++;
1441 }
Sebastian Poehn380b1532011-07-07 04:30:29 -07001442 /* cluster starts and ends will be separated because they should
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001443 * hold their position
1444 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001445 if (tab->fe[i].ctrl & RQFCR_CLE)
1446 block_index++;
1447 /* A not set AND indicates the end of a depended block */
1448 if (!(tab->fe[i].ctrl & RQFCR_AND))
1449 block_index++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001450 }
1451
1452 mask_table[and_index - 1].end = i - 1;
1453
1454 return and_index;
1455}
1456
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001457/* Sorts the entries of mask_table by the values of the masks.
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001458 * Important: The 0xFF80 flags of the first and last entry of a
1459 * block must hold their position (which queue, CLusterEnable, ReJEct,
1460 * AND)
1461 */
1462static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001463 struct filer_table *temp_table, u32 and_index)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001464{
1465 /* Pointer to compare function (_asc or _desc) */
1466 int (*gfar_comp)(const void *, const void *);
1467
1468 u32 i, size = 0, start = 0, prev = 1;
1469 u32 old_first, old_last, new_first, new_last;
1470
1471 gfar_comp = &gfar_comp_desc;
1472
1473 for (i = 0; i < and_index; i++) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001474 if (prev != mask_table[i].block) {
1475 old_first = mask_table[start].start + 1;
1476 old_last = mask_table[i - 1].end;
1477 sort(mask_table + start, size,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001478 sizeof(struct gfar_mask_entry),
1479 gfar_comp, &gfar_swap);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001480
1481 /* Toggle order for every block. This makes the
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001482 * thing more efficient!
1483 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001484 if (gfar_comp == gfar_comp_desc)
1485 gfar_comp = &gfar_comp_asc;
1486 else
1487 gfar_comp = &gfar_comp_desc;
1488
1489 new_first = mask_table[start].start + 1;
1490 new_last = mask_table[i - 1].end;
1491
Sebastian Poehn380b1532011-07-07 04:30:29 -07001492 gfar_swap_bits(&temp_table->fe[new_first],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001493 &temp_table->fe[old_first],
1494 &temp_table->fe[new_last],
1495 &temp_table->fe[old_last],
1496 RQFCR_QUEUE | RQFCR_CLE |
1497 RQFCR_RJE | RQFCR_AND);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001498
1499 start = i;
1500 size = 0;
1501 }
1502 size++;
1503 prev = mask_table[i].block;
1504 }
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001505}
1506
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001507/* Reduces the number of masks needed in the filer table to save entries
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001508 * This is done by sorting the masks of a depended block. A depended block is
1509 * identified by gluing ANDs or CLE. The sorting order toggles after every
1510 * block. Of course entries in scope of a mask must change their location with
1511 * it.
1512 */
1513static int gfar_optimize_filer_masks(struct filer_table *tab)
1514{
1515 struct filer_table *temp_table;
1516 struct gfar_mask_entry *mask_table;
1517
1518 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1519 s32 ret = 0;
1520
1521 /* We need a copy of the filer table because
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001522 * we want to change its order
1523 */
Thomas Meyerb8ffdbd2011-11-17 13:05:35 +00001524 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001525 if (temp_table == NULL)
1526 return -ENOMEM;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001527
1528 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001529 sizeof(struct gfar_mask_entry), GFP_KERNEL);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001530
1531 if (mask_table == NULL) {
1532 ret = -ENOMEM;
1533 goto end;
1534 }
1535
1536 and_index = gfar_generate_mask_table(mask_table, tab);
1537
1538 gfar_sort_mask_table(mask_table, temp_table, and_index);
1539
1540 /* Now we can copy the data from our duplicated filer table to
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001541 * the real one in the order the mask table says
1542 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001543 for (i = 0; i < and_index; i++) {
1544 size = mask_table[i].end - mask_table[i].start + 1;
1545 gfar_copy_filer_entries(&(tab->fe[j]),
1546 &(temp_table->fe[mask_table[i].start]), size);
1547 j += size;
1548 }
1549
1550 /* And finally we just have to check for duplicated masks and drop the
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001551 * second ones
1552 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001553 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1554 if (tab->fe[i].ctrl == 0x80) {
1555 previous_mask = i++;
1556 break;
1557 }
1558 }
1559 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1560 if (tab->fe[i].ctrl == 0x80) {
1561 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1562 /* Two identical ones found!
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001563 * So drop the second one!
1564 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001565 gfar_trim_filer_entries(i, i, tab);
1566 } else
1567 /* Not identical! */
1568 previous_mask = i;
1569 }
1570 }
1571
1572 kfree(mask_table);
1573end: kfree(temp_table);
1574 return ret;
1575}
1576
1577/* Write the bit-pattern from software's buffer to hardware registers */
1578static int gfar_write_filer_table(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001579 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001580{
1581 u32 i = 0;
1582 if (tab->index > MAX_FILER_IDX - 1)
1583 return -EBUSY;
1584
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001585 /* Fill regular entries */
Jakub Kicinskia898fe02015-08-12 02:41:55 +02001586 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001587 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1588 /* Fill the rest with fall-troughs */
Jakub Kicinskia898fe02015-08-12 02:41:55 +02001589 for (; i < MAX_FILER_IDX; i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001590 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1591 /* Last entry must be default accept
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001592 * because that's what people expect
1593 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001594 gfar_write_filer(priv, i, 0x20, 0x0);
1595
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001596 return 0;
1597}
1598
1599static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001600 struct gfar_private *priv)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001601{
1602
1603 if (flow->flow_type & FLOW_EXT) {
1604 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1605 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001606 "User-specific data not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001607 if (~flow->m_ext.vlan_etype)
1608 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001609 "VLAN-etype not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001610 }
1611 if (flow->flow_type == IP_USER_FLOW)
1612 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1613 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001614 "IP-Version differing from IPv4 not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001615
1616 return 0;
1617}
1618
1619static int gfar_process_filer_changes(struct gfar_private *priv)
1620{
1621 struct ethtool_flow_spec_container *j;
1622 struct filer_table *tab;
1623 s32 i = 0;
1624 s32 ret = 0;
1625
1626 /* So index is set to zero, too! */
1627 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1628 if (tab == NULL)
1629 return -ENOMEM;
1630
1631 /* Now convert the existing filer data from flow_spec into
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001632 * filer tables binary format
1633 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001634 list_for_each_entry(j, &priv->rx_list.list, list) {
1635 ret = gfar_convert_to_filer(&j->fs, tab);
1636 if (ret == -EBUSY) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001637 netdev_err(priv->ndev,
1638 "Rule not added: No free space!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001639 goto end;
1640 }
1641 if (ret == -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001642 netdev_err(priv->ndev,
1643 "Rule not added: Unsupported Flow-type!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001644 goto end;
1645 }
1646 }
1647
1648 i = tab->index;
1649
1650 /* Optimizations to save entries */
1651 gfar_cluster_filer(tab);
1652 gfar_optimize_filer_masks(tab);
1653
Joe Perches375d6a12013-04-13 19:03:18 +00001654 pr_debug("\tSummary:\n"
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001655 "\tData on hardware: %d\n"
1656 "\tCompression rate: %d%%\n",
1657 tab->index, 100 - (100 * tab->index) / i);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001658
1659 /* Write everything to hardware */
1660 ret = gfar_write_filer_table(priv, tab);
1661 if (ret == -EBUSY) {
1662 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1663 goto end;
1664 }
1665
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001666end:
1667 kfree(tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001668 return ret;
1669}
1670
1671static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1672{
1673 u32 i = 0;
1674
1675 for (i = 0; i < sizeof(flow->m_u); i++)
1676 flow->m_u.hdata[i] ^= 0xFF;
1677
Claudiu Manoil42851e82014-01-14 15:35:00 +02001678 flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1679 flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1680 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1681 flow->m_ext.data[1] ^= cpu_to_be32(~0);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001682}
1683
1684static int gfar_add_cls(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001685 struct ethtool_rx_flow_spec *flow)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001686{
1687 struct ethtool_flow_spec_container *temp, *comp;
1688 int ret = 0;
1689
1690 temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1691 if (temp == NULL)
1692 return -ENOMEM;
1693 memcpy(&temp->fs, flow, sizeof(temp->fs));
1694
1695 gfar_invert_masks(&temp->fs);
1696 ret = gfar_check_capability(&temp->fs, priv);
1697 if (ret)
1698 goto clean_mem;
1699 /* Link in the new element at the right @location */
1700 if (list_empty(&priv->rx_list.list)) {
1701 ret = gfar_check_filer_hardware(priv);
1702 if (ret != 0)
1703 goto clean_mem;
1704 list_add(&temp->list, &priv->rx_list.list);
1705 goto process;
1706 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001707 list_for_each_entry(comp, &priv->rx_list.list, list) {
1708 if (comp->fs.location > flow->location) {
1709 list_add_tail(&temp->list, &comp->list);
1710 goto process;
1711 }
1712 if (comp->fs.location == flow->location) {
1713 netdev_err(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001714 "Rule not added: ID %d not free!\n",
1715 flow->location);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001716 ret = -EBUSY;
1717 goto clean_mem;
1718 }
1719 }
1720 list_add_tail(&temp->list, &priv->rx_list.list);
1721 }
1722
1723process:
Jakub Kicinskib5c8c892015-08-12 02:41:56 +02001724 priv->rx_list.count++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001725 ret = gfar_process_filer_changes(priv);
1726 if (ret)
1727 goto clean_list;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001728 return ret;
1729
1730clean_list:
Jakub Kicinskib5c8c892015-08-12 02:41:56 +02001731 priv->rx_list.count--;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001732 list_del(&temp->list);
1733clean_mem:
1734 kfree(temp);
1735 return ret;
1736}
1737
1738static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1739{
1740 struct ethtool_flow_spec_container *comp;
1741 u32 ret = -EINVAL;
1742
1743 if (list_empty(&priv->rx_list.list))
1744 return ret;
1745
1746 list_for_each_entry(comp, &priv->rx_list.list, list) {
1747 if (comp->fs.location == loc) {
1748 list_del(&comp->list);
1749 kfree(comp);
1750 priv->rx_list.count--;
1751 gfar_process_filer_changes(priv);
1752 ret = 0;
1753 break;
1754 }
1755 }
1756
1757 return ret;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001758}
1759
1760static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1761{
1762 struct ethtool_flow_spec_container *comp;
1763 u32 ret = -EINVAL;
1764
1765 list_for_each_entry(comp, &priv->rx_list.list, list) {
1766 if (comp->fs.location == cmd->fs.location) {
1767 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1768 gfar_invert_masks(&cmd->fs);
1769 ret = 0;
1770 break;
1771 }
1772 }
1773
1774 return ret;
1775}
1776
1777static int gfar_get_cls_all(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001778 struct ethtool_rxnfc *cmd, u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001779{
1780 struct ethtool_flow_spec_container *comp;
1781 u32 i = 0;
1782
1783 list_for_each_entry(comp, &priv->rx_list.list, list) {
David S. Miller8decf862011-09-22 03:23:13 -04001784 if (i == cmd->rule_cnt)
1785 return -EMSGSIZE;
1786 rule_locs[i] = comp->fs.location;
1787 i++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001788 }
1789
1790 cmd->data = MAX_FILER_IDX;
Ben Hutchings473e64e2011-09-06 13:52:47 +00001791 cmd->rule_cnt = i;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001792
1793 return 0;
1794}
1795
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001796static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1797{
1798 struct gfar_private *priv = netdev_priv(dev);
1799 int ret = 0;
1800
Claudiu Manoil08511332014-02-24 12:13:45 +02001801 if (test_bit(GFAR_RESETTING, &priv->state))
1802 return -EBUSY;
1803
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001804 mutex_lock(&priv->rx_queue_access);
1805
1806 switch (cmd->cmd) {
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001807 case ETHTOOL_SRXFH:
1808 ret = gfar_set_hash_opts(priv, cmd);
1809 break;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001810 case ETHTOOL_SRXCLSRLINS:
Ben Hutchings3a73e492012-01-03 11:59:30 +00001811 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1812 cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1813 cmd->fs.location >= MAX_FILER_IDX) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001814 ret = -EINVAL;
1815 break;
1816 }
1817 ret = gfar_add_cls(priv, &cmd->fs);
1818 break;
1819 case ETHTOOL_SRXCLSRLDEL:
1820 ret = gfar_del_cls(priv, cmd->fs.location);
1821 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001822 default:
1823 ret = -EINVAL;
1824 }
1825
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001826 mutex_unlock(&priv->rx_queue_access);
1827
1828 return ret;
1829}
1830
1831static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001832 u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001833{
1834 struct gfar_private *priv = netdev_priv(dev);
1835 int ret = 0;
1836
1837 switch (cmd->cmd) {
1838 case ETHTOOL_GRXRINGS:
1839 cmd->data = priv->num_rx_queues;
1840 break;
1841 case ETHTOOL_GRXCLSRLCNT:
1842 cmd->rule_cnt = priv->rx_list.count;
1843 break;
1844 case ETHTOOL_GRXCLSRULE:
1845 ret = gfar_get_cls(priv, cmd);
1846 break;
1847 case ETHTOOL_GRXCLSRLALL:
Ben Hutchings815c7db2011-09-06 13:49:12 +00001848 ret = gfar_get_cls_all(priv, cmd, rule_locs);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001849 break;
1850 default:
1851 ret = -EINVAL;
1852 break;
1853 }
1854
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001855 return ret;
1856}
1857
Richard Cochran66636282012-04-03 22:59:19 +00001858int gfar_phc_index = -1;
Richard Cochran28889b72012-09-20 19:11:12 +00001859EXPORT_SYMBOL(gfar_phc_index);
Richard Cochran66636282012-04-03 22:59:19 +00001860
1861static int gfar_get_ts_info(struct net_device *dev,
1862 struct ethtool_ts_info *info)
1863{
1864 struct gfar_private *priv = netdev_priv(dev);
1865
1866 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001867 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1868 SOF_TIMESTAMPING_SOFTWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001869 info->phc_index = -1;
1870 return 0;
1871 }
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001872 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1873 SOF_TIMESTAMPING_RX_HARDWARE |
1874 SOF_TIMESTAMPING_RAW_HARDWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001875 info->phc_index = gfar_phc_index;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001876 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1877 (1 << HWTSTAMP_TX_ON);
1878 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1879 (1 << HWTSTAMP_FILTER_ALL);
Richard Cochran66636282012-04-03 22:59:19 +00001880 return 0;
1881}
1882
Jeff Garzik7282d492006-09-13 14:30:00 -04001883const struct ethtool_ops gfar_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 .get_settings = gfar_gsettings,
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001885 .set_settings = gfar_ssettings,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 .get_drvinfo = gfar_gdrvinfo,
1887 .get_regs_len = gfar_reglen,
1888 .get_regs = gfar_get_regs,
1889 .get_link = ethtool_op_get_link,
1890 .get_coalesce = gfar_gcoalesce,
1891 .set_coalesce = gfar_scoalesce,
1892 .get_ringparam = gfar_gringparam,
1893 .set_ringparam = gfar_sringparam,
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001894 .get_pauseparam = gfar_gpauseparam,
1895 .set_pauseparam = gfar_spauseparam,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 .get_strings = gfar_gstrings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001897 .get_sset_count = gfar_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 .get_ethtool_stats = gfar_fill_stats,
Kumar Gala0bbaf062005-06-20 10:54:21 -05001899 .get_msglevel = gfar_get_msglevel,
1900 .set_msglevel = gfar_set_msglevel,
Scott Woodd87eb122008-07-11 18:04:45 -05001901#ifdef CONFIG_PM
1902 .get_wol = gfar_get_wol,
1903 .set_wol = gfar_set_wol,
1904#endif
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001905 .set_rxnfc = gfar_set_nfc,
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001906 .get_rxnfc = gfar_get_nfc,
Richard Cochran66636282012-04-03 22:59:19 +00001907 .get_ts_info = gfar_get_ts_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908};