blob: dd7ccec506f1a1803c537a0a2b627b8bc1828511 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Gortmaker3396c782012-01-27 13:36:01 +00002 * drivers/net/ethernet/freescale/gianfar_ethtool.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000012 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -040014 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * by reference.
17 */
18
Joe Perches59deab22011-06-14 08:57:47 +000019#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/string.h>
23#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
David S. Miller65a85a82012-04-06 00:35:34 -040028#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/uaccess.h>
36#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/crc32.h>
38#include <asm/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ethtool.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040040#include <linux/mii.h>
41#include <linux/phy.h>
Sebastian Poehn4aa3a712011-06-20 13:57:59 -070042#include <linux/sort.h>
Sebastian Poehn380b1532011-07-07 04:30:29 -070043#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include "gianfar.h"
46
Andy Flemingbb40dcb2005-09-23 22:54:21 -040047#define GFAR_MAX_COAL_USECS 0xffff
48#define GFAR_MAX_COAL_FRAMES 0xff
Kumar Gala0bbaf062005-06-20 10:54:21 -050049static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000050 u64 *buf);
Kumar Gala0bbaf062005-06-20 10:54:21 -050051static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000052static int gfar_gcoalesce(struct net_device *dev,
53 struct ethtool_coalesce *cvals);
54static int gfar_scoalesce(struct net_device *dev,
55 struct ethtool_coalesce *cvals);
56static void gfar_gringparam(struct net_device *dev,
57 struct ethtool_ringparam *rvals);
58static int gfar_sringparam(struct net_device *dev,
59 struct ethtool_ringparam *rvals);
60static void gfar_gdrvinfo(struct net_device *dev,
61 struct ethtool_drvinfo *drvinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Paul Gortmaker30f7e312012-01-08 13:21:57 -050063static const char stat_gstrings[][ETH_GSTRING_LEN] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 "rx-large-frame-errors",
65 "rx-short-frame-errors",
66 "rx-non-octet-errors",
67 "rx-crc-errors",
68 "rx-overrun-errors",
69 "rx-busy-errors",
70 "rx-babbling-errors",
71 "rx-truncated-frames",
72 "ethernet-bus-error",
73 "tx-babbling-errors",
74 "tx-underrun-errors",
75 "rx-skb-missing-errors",
76 "tx-timeout-errors",
77 "tx-rx-64-frames",
78 "tx-rx-65-127-frames",
79 "tx-rx-128-255-frames",
80 "tx-rx-256-511-frames",
81 "tx-rx-512-1023-frames",
82 "tx-rx-1024-1518-frames",
83 "tx-rx-1519-1522-good-vlan",
84 "rx-bytes",
85 "rx-packets",
86 "rx-fcs-errors",
87 "receive-multicast-packet",
88 "receive-broadcast-packet",
89 "rx-control-frame-packets",
90 "rx-pause-frame-packets",
91 "rx-unknown-op-code",
92 "rx-alignment-error",
93 "rx-frame-length-error",
94 "rx-code-error",
95 "rx-carrier-sense-error",
96 "rx-undersize-packets",
97 "rx-oversize-packets",
98 "rx-fragmented-frames",
99 "rx-jabber-frames",
100 "rx-dropped-frames",
101 "tx-byte-counter",
102 "tx-packets",
103 "tx-multicast-packets",
104 "tx-broadcast-packets",
105 "tx-pause-control-frames",
106 "tx-deferral-packets",
107 "tx-excessive-deferral-packets",
108 "tx-single-collision-packets",
109 "tx-multiple-collision-packets",
110 "tx-late-collision-packets",
111 "tx-excessive-collision-packets",
112 "tx-total-collision",
113 "reserved",
114 "tx-dropped-frames",
115 "tx-jabber-frames",
116 "tx-fcs-errors",
117 "tx-control-frames",
118 "tx-oversize-frames",
119 "tx-undersize-frames",
120 "tx-fragmented-frames",
121};
122
Kumar Gala0bbaf062005-06-20 10:54:21 -0500123/* Fill in a buffer with the strings which correspond to the
124 * stats */
125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
126{
127 struct gfar_private *priv = netdev_priv(dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600128
Andy Flemingb31a1d82008-12-16 15:29:15 -0800129 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else
132 memcpy(buf, stat_gstrings,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000133 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/* Fill in an array of 64-bit statistics from various sources.
137 * This array will be appended to the end of the ethtool_stats
138 * structure, and returned to user space
139 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000140static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
141 u64 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
143 int i;
144 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000145 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Paul Gortmaker212079d2013-02-12 15:38:19 -0500146 atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Paul Gortmaker68719782013-02-12 15:28:35 -0500148 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
Paul Gortmaker212079d2013-02-12 15:38:19 -0500149 buf[i] = atomic64_read(&extra[i]);
Paul Gortmaker68719782013-02-12 15:28:35 -0500150
Andy Flemingb31a1d82008-12-16 15:29:15 -0800151 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000152 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Paul Gortmaker68719782013-02-12 15:28:35 -0500154 for (; i < GFAR_STATS_LEN; i++, rmon++)
155 buf[i] = (u64) gfar_read(rmon);
156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700159static int gfar_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700163 switch (sset) {
164 case ETH_SS_STATS:
Andy Flemingb31a1d82008-12-16 15:29:15 -0800165 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700166 return GFAR_STATS_LEN;
167 else
168 return GFAR_EXTRA_STATS_LEN;
169 default:
170 return -EOPNOTSUPP;
171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/* Fills in the drvinfo structure with some basic info */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000175static void gfar_gdrvinfo(struct net_device *dev,
176 struct ethtool_drvinfo *drvinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Jiri Pirko7826d432013-01-06 00:44:26 +0000178 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
179 strlcpy(drvinfo->version, gfar_driver_version,
180 sizeof(drvinfo->version));
181 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
182 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 drvinfo->regdump_len = 0;
184 drvinfo->eedump_len = 0;
185}
186
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400187
188static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
189{
190 struct gfar_private *priv = netdev_priv(dev);
191 struct phy_device *phydev = priv->phydev;
192
193 if (NULL == phydev)
194 return -ENODEV;
195
196 return phy_ethtool_sset(phydev, cmd);
197}
198
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/* Return the current settings in the ethtool_cmd structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500201static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
203 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400204 struct phy_device *phydev = priv->phydev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000205 struct gfar_priv_rx_q *rx_queue = NULL;
206 struct gfar_priv_tx_q *tx_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400208 if (NULL == phydev)
209 return -ENODEV;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000210 tx_queue = priv->tx_queue[0];
211 rx_queue = priv->rx_queue[0];
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400212
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000213 /* etsec-1.7 and older versions have only one txic
214 * and rxic regs although they support multiple queues */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000215 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
216 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400218 return phy_ethtool_gset(phydev, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221/* Return the length of the register structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500222static int gfar_reglen(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
224 return sizeof (struct gfar);
225}
226
227/* Return a dump of the GFAR register space */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000228static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
229 void *regbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 int i;
232 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000233 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 u32 *buf = (u32 *) regbuf;
235
236 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
Kumar Galacc8c6e32006-02-01 15:18:03 -0600237 buf[i] = gfar_read(&theregs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/* Convert microseconds to ethernet clock ticks, which changes
241 * depending on what speed the controller is running at */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000242static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
243 unsigned int usecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 unsigned int count;
246
247 /* The timer is different, depending on the interface speed */
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400248 switch (priv->phydev->speed) {
249 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 count = GFAR_GBIT_TIME;
251 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400252 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 count = GFAR_100_TIME;
254 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400255 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 default:
257 count = GFAR_10_TIME;
258 break;
259 }
260
261 /* Make sure we return a number greater than 0
262 * if usecs > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000263 return (usecs * 1000 + count - 1) / count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266/* Convert ethernet clock ticks to microseconds */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000267static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
268 unsigned int ticks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
270 unsigned int count;
271
272 /* The timer is different, depending on the interface speed */
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400273 switch (priv->phydev->speed) {
274 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 count = GFAR_GBIT_TIME;
276 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400277 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 count = GFAR_100_TIME;
279 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400280 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 default:
282 count = GFAR_10_TIME;
283 break;
284 }
285
286 /* Make sure we return a number greater than 0 */
287 /* if ticks is > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000288 return (ticks * count) / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291/* Get the coalescing parameters, and put them in the cvals
292 * structure. */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000293static int gfar_gcoalesce(struct net_device *dev,
294 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
296 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000297 struct gfar_priv_rx_q *rx_queue = NULL;
298 struct gfar_priv_tx_q *tx_queue = NULL;
Dai Harukib46a8452008-12-16 15:29:52 -0800299 unsigned long rxtime;
300 unsigned long rxcount;
301 unsigned long txtime;
302 unsigned long txcount;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400303
Andy Flemingb31a1d82008-12-16 15:29:15 -0800304 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500305 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400307 if (NULL == priv->phydev)
308 return -ENODEV;
309
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000310 rx_queue = priv->rx_queue[0];
311 tx_queue = priv->tx_queue[0];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000312
313 rxtime = get_ictt_value(rx_queue->rxic);
314 rxcount = get_icft_value(rx_queue->rxic);
315 txtime = get_ictt_value(tx_queue->txic);
316 txcount = get_icft_value(tx_queue->txic);
Dai Harukib46a8452008-12-16 15:29:52 -0800317 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
318 cvals->rx_max_coalesced_frames = rxcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Dai Harukib46a8452008-12-16 15:29:52 -0800320 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
321 cvals->tx_max_coalesced_frames = txcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 cvals->use_adaptive_rx_coalesce = 0;
324 cvals->use_adaptive_tx_coalesce = 0;
325
326 cvals->pkt_rate_low = 0;
327 cvals->rx_coalesce_usecs_low = 0;
328 cvals->rx_max_coalesced_frames_low = 0;
329 cvals->tx_coalesce_usecs_low = 0;
330 cvals->tx_max_coalesced_frames_low = 0;
331
332 /* When the packet rate is below pkt_rate_high but above
333 * pkt_rate_low (both measured in packets per second) the
334 * normal {rx,tx}_* coalescing parameters are used.
335 */
336
337 /* When the packet rate is (measured in packets per second)
338 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
339 * used.
340 */
341 cvals->pkt_rate_high = 0;
342 cvals->rx_coalesce_usecs_high = 0;
343 cvals->rx_max_coalesced_frames_high = 0;
344 cvals->tx_coalesce_usecs_high = 0;
345 cvals->tx_max_coalesced_frames_high = 0;
346
347 /* How often to do adaptive coalescing packet rate sampling,
348 * measured in seconds. Must not be zero.
349 */
350 cvals->rate_sample_interval = 0;
351
352 return 0;
353}
354
355/* Change the coalescing values.
356 * Both cvals->*_usecs and cvals->*_frames have to be > 0
357 * in order for coalescing to be active
358 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000359static int gfar_scoalesce(struct net_device *dev,
360 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000363 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Andy Flemingb31a1d82008-12-16 15:29:15 -0800365 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500366 return -EOPNOTSUPP;
367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 /* Set up rx coalescing */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000369 /* As of now, we will enable/disable coalescing for all
370 * queues together in case of eTSEC2, this will be modified
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000371 * along with the ethtool interface
372 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 if ((cvals->rx_coalesce_usecs == 0) ||
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000374 (cvals->rx_max_coalesced_frames == 0)) {
375 for (i = 0; i < priv->num_rx_queues; i++)
376 priv->rx_queue[i]->rxcoalescing = 0;
377 } else {
378 for (i = 0; i < priv->num_rx_queues; i++)
379 priv->rx_queue[i]->rxcoalescing = 1;
380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400382 if (NULL == priv->phydev)
383 return -ENODEV;
384
385 /* Check the bounds of the values */
386 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
Joe Perches375d6a12013-04-13 19:03:18 +0000387 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
388 GFAR_MAX_COAL_USECS);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400389 return -EINVAL;
390 }
391
392 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
Joe Perches375d6a12013-04-13 19:03:18 +0000393 netdev_info(dev, "Coalescing is limited to %d frames\n",
394 GFAR_MAX_COAL_FRAMES);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400395 return -EINVAL;
396 }
397
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000398 for (i = 0; i < priv->num_rx_queues; i++) {
399 priv->rx_queue[i]->rxic = mk_ic_value(
400 cvals->rx_max_coalesced_frames,
401 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 /* Set up tx coalescing */
405 if ((cvals->tx_coalesce_usecs == 0) ||
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000406 (cvals->tx_max_coalesced_frames == 0)) {
407 for (i = 0; i < priv->num_tx_queues; i++)
408 priv->tx_queue[i]->txcoalescing = 0;
409 } else {
410 for (i = 0; i < priv->num_tx_queues; i++)
411 priv->tx_queue[i]->txcoalescing = 1;
412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400414 /* Check the bounds of the values */
415 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
Joe Perches375d6a12013-04-13 19:03:18 +0000416 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
417 GFAR_MAX_COAL_USECS);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400418 return -EINVAL;
419 }
420
421 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
Joe Perches375d6a12013-04-13 19:03:18 +0000422 netdev_info(dev, "Coalescing is limited to %d frames\n",
423 GFAR_MAX_COAL_FRAMES);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400424 return -EINVAL;
425 }
426
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000427 for (i = 0; i < priv->num_tx_queues; i++) {
428 priv->tx_queue[i]->txic = mk_ic_value(
429 cvals->tx_max_coalesced_frames,
430 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Claudiu Manoil800c6442013-03-19 07:40:05 +0000433 gfar_configure_coalescing_all(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 return 0;
436}
437
438/* Fills in rvals with the current ring parameters. Currently,
439 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
440 * jumbo are ignored by the driver */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000441static void gfar_gringparam(struct net_device *dev,
442 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
444 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000445 struct gfar_priv_tx_q *tx_queue = NULL;
446 struct gfar_priv_rx_q *rx_queue = NULL;
447
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000448 tx_queue = priv->tx_queue[0];
449 rx_queue = priv->rx_queue[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
452 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
453 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
454 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
455
456 /* Values changeable by the user. The valid values are
457 * in the range 1 to the "*_max_pending" counterpart above.
458 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000459 rvals->rx_pending = rx_queue->rx_ring_size;
460 rvals->rx_mini_pending = rx_queue->rx_ring_size;
461 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
462 rvals->tx_pending = tx_queue->tx_ring_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465/* Change the current ring parameters, stopping the controller if
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200466 * necessary so that we don't mess things up while we're in motion.
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000467 */
468static int gfar_sringparam(struct net_device *dev,
469 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200472 int err = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
475 return -EINVAL;
476
477 if (!is_power_of_2(rvals->rx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000478 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -EINVAL;
480 }
481
482 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
483 return -EINVAL;
484
485 if (!is_power_of_2(rvals->tx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000486 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 return -EINVAL;
488 }
489
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200490 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200493 /* Change the sizes */
494 for (i = 0; i < priv->num_rx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000495 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200496
497 for (i = 0; i < priv->num_tx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000498 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Kumar Gala0bbaf062005-06-20 10:54:21 -0500500 /* Rebuild the rings with the new size */
Dai Haruki12dea572008-12-16 15:30:20 -0800501 if (dev->flags & IFF_UP) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500502 err = startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000503 netif_tx_wake_all_queues(dev);
Dai Haruki12dea572008-12-16 15:30:20 -0800504 }
Kumar Gala0bbaf062005-06-20 10:54:21 -0500505 return err;
506}
507
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300508static void gfar_gpauseparam(struct net_device *dev,
509 struct ethtool_pauseparam *epause)
510{
511 struct gfar_private *priv = netdev_priv(dev);
512
513 epause->autoneg = !!priv->pause_aneg_en;
514 epause->rx_pause = !!priv->rx_pause_en;
515 epause->tx_pause = !!priv->tx_pause_en;
516}
517
518static int gfar_spauseparam(struct net_device *dev,
519 struct ethtool_pauseparam *epause)
520{
521 struct gfar_private *priv = netdev_priv(dev);
522 struct phy_device *phydev = priv->phydev;
523 struct gfar __iomem *regs = priv->gfargrp[0].regs;
524 u32 oldadv, newadv;
525
526 if (!(phydev->supported & SUPPORTED_Pause) ||
527 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
528 (epause->rx_pause != epause->tx_pause)))
529 return -EINVAL;
530
531 priv->rx_pause_en = priv->tx_pause_en = 0;
532 if (epause->rx_pause) {
533 priv->rx_pause_en = 1;
534
535 if (epause->tx_pause) {
536 priv->tx_pause_en = 1;
537 /* FLOW_CTRL_RX & TX */
538 newadv = ADVERTISED_Pause;
539 } else /* FLOW_CTLR_RX */
540 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
541 } else if (epause->tx_pause) {
542 priv->tx_pause_en = 1;
543 /* FLOW_CTLR_TX */
544 newadv = ADVERTISED_Asym_Pause;
545 } else
546 newadv = 0;
547
548 if (epause->autoneg)
549 priv->pause_aneg_en = 1;
550 else
551 priv->pause_aneg_en = 0;
552
553 oldadv = phydev->advertising &
554 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
555 if (oldadv != newadv) {
556 phydev->advertising &=
557 ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
558 phydev->advertising |= newadv;
559 if (phydev->autoneg)
560 /* inform link partner of our
561 * new flow ctrl settings
562 */
563 return phy_start_aneg(phydev);
564
565 if (!epause->autoneg) {
566 u32 tempval;
567 tempval = gfar_read(&regs->maccfg1);
568 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
569 if (priv->tx_pause_en)
570 tempval |= MACCFG1_TX_FLOW;
571 if (priv->rx_pause_en)
572 tempval |= MACCFG1_RX_FLOW;
573 gfar_write(&regs->maccfg1, tempval);
574 }
575 }
576
577 return 0;
578}
579
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000580int gfar_set_features(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500581{
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000582 netdev_features_t changed = dev->features ^ features;
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200583 int err = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500584
Claudiu Manoil88302642014-02-24 12:13:43 +0200585 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
586 NETIF_F_RXCSUM)))
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000587 return 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000588
Claudiu Manoil88302642014-02-24 12:13:43 +0200589 dev->features = features;
590
Kumar Gala0bbaf062005-06-20 10:54:21 -0500591 if (dev->flags & IFF_UP) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500592 /* Now we take down the rings to rebuild them */
593 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 err = startup_gfar(dev);
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000595 netif_tx_wake_all_queues(dev);
Dai Haruki12dea572008-12-16 15:30:20 -0800596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 return err;
598}
599
Kumar Gala0bbaf062005-06-20 10:54:21 -0500600static uint32_t gfar_get_msglevel(struct net_device *dev)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400601{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500602 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000603
Kumar Gala0bbaf062005-06-20 10:54:21 -0500604 return priv->msg_enable;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400605}
606
Kumar Gala0bbaf062005-06-20 10:54:21 -0500607static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400608{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500609 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000610
Kumar Gala0bbaf062005-06-20 10:54:21 -0500611 priv->msg_enable = data;
612}
613
Scott Woodd87eb122008-07-11 18:04:45 -0500614#ifdef CONFIG_PM
615static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
616{
617 struct gfar_private *priv = netdev_priv(dev);
618
Andy Flemingb31a1d82008-12-16 15:29:15 -0800619 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
Scott Woodd87eb122008-07-11 18:04:45 -0500620 wol->supported = WAKE_MAGIC;
621 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
622 } else {
623 wol->supported = wol->wolopts = 0;
624 }
625}
626
627static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
628{
629 struct gfar_private *priv = netdev_priv(dev);
630 unsigned long flags;
631
Andy Flemingb31a1d82008-12-16 15:29:15 -0800632 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -0500633 wol->wolopts != 0)
634 return -EINVAL;
635
636 if (wol->wolopts & ~WAKE_MAGIC)
637 return -EINVAL;
638
Rafael J. Wysocki6c4f1992010-11-09 11:54:19 +0000639 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
640
Scott Woodd87eb122008-07-11 18:04:45 -0500641 spin_lock_irqsave(&priv->bflock, flags);
Rafael J. Wysocki6c4f1992010-11-09 11:54:19 +0000642 priv->wol_en = !!device_may_wakeup(&dev->dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500643 spin_unlock_irqrestore(&priv->bflock, flags);
644
645 return 0;
646}
647#endif
Kumar Gala0bbaf062005-06-20 10:54:21 -0500648
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000649static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
650{
651 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
652
653 if (ethflow & RXH_L2DA) {
654 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000655 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000656 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
657 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000658 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
659 priv->cur_filer_idx = priv->cur_filer_idx - 1;
660
661 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000662 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000663 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
664 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000665 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
666 priv->cur_filer_idx = priv->cur_filer_idx - 1;
667 }
668
669 if (ethflow & RXH_VLAN) {
670 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000671 RQFCR_AND | RQFCR_HASHTBL_0;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000672 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000673 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
674 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000675 priv->cur_filer_idx = priv->cur_filer_idx - 1;
676 }
677
678 if (ethflow & RXH_IP_SRC) {
679 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000680 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000681 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
682 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000683 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
684 priv->cur_filer_idx = priv->cur_filer_idx - 1;
685 }
686
687 if (ethflow & (RXH_IP_DST)) {
688 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000689 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000690 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
691 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000692 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
693 priv->cur_filer_idx = priv->cur_filer_idx - 1;
694 }
695
696 if (ethflow & RXH_L3_PROTO) {
697 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000698 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000699 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
700 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000701 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
702 priv->cur_filer_idx = priv->cur_filer_idx - 1;
703 }
704
705 if (ethflow & RXH_L4_B_0_1) {
706 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000707 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000708 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
709 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000710 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
711 priv->cur_filer_idx = priv->cur_filer_idx - 1;
712 }
713
714 if (ethflow & RXH_L4_B_2_3) {
715 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000716 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000717 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
718 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000719 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
720 priv->cur_filer_idx = priv->cur_filer_idx - 1;
721 }
722}
723
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000724static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
725 u64 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000726{
727 unsigned int last_rule_idx = priv->cur_filer_idx;
728 unsigned int cmp_rqfpr;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000729 unsigned int *local_rqfpr;
730 unsigned int *local_rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000731 int i = 0x0, k = 0x0;
732 int j = MAX_FILER_IDX, l = 0x0;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000733 int ret = 1;
734
Joe Perchesb2adaca2013-02-03 17:43:58 +0000735 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
736 GFP_KERNEL);
737 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
738 GFP_KERNEL);
Wang Shaoyan588dc912011-08-11 17:07:25 +0000739 if (!local_rqfpr || !local_rqfcr) {
Wang Shaoyan588dc912011-08-11 17:07:25 +0000740 ret = 0;
741 goto err;
742 }
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000743
744 switch (class) {
745 case TCP_V4_FLOW:
746 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
747 break;
748 case UDP_V4_FLOW:
749 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
750 break;
751 case TCP_V6_FLOW:
752 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
753 break;
754 case UDP_V6_FLOW:
755 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
756 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000757 default:
Joe Perches375d6a12013-04-13 19:03:18 +0000758 netdev_err(priv->ndev,
759 "Right now this class is not supported\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000760 ret = 0;
761 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000762 }
763
764 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000765 local_rqfpr[j] = priv->ftp_rqfpr[i];
766 local_rqfcr[j] = priv->ftp_rqfcr[i];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000767 j--;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000768 if ((priv->ftp_rqfcr[i] ==
769 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
770 (priv->ftp_rqfpr[i] == cmp_rqfpr))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000771 break;
772 }
773
774 if (i == MAX_FILER_IDX + 1) {
Joe Perches375d6a12013-04-13 19:03:18 +0000775 netdev_err(priv->ndev,
776 "No parse rule found, can't create hash rules\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000777 ret = 0;
778 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000779 }
780
781 /* If a match was found, then it begins the starting of a cluster rule
782 * if it was already programmed, we need to overwrite these rules
783 */
784 for (l = i+1; l < MAX_FILER_IDX; l++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000785 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000786 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000787 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000788 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000789 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
790 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000791 priv->ftp_rqfpr[l]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000792 break;
793 }
794
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000795 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
796 (priv->ftp_rqfcr[l] & RQFCR_AND))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000797 continue;
798 else {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000799 local_rqfpr[j] = priv->ftp_rqfpr[l];
800 local_rqfcr[j] = priv->ftp_rqfcr[l];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000801 j--;
802 }
803 }
804
805 priv->cur_filer_idx = l - 1;
806 last_rule_idx = l;
807
808 /* hash rules */
809 ethflow_to_filer_rules(priv, ethflow);
810
811 /* Write back the popped out rules again */
812 for (k = j+1; k < MAX_FILER_IDX; k++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000813 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
814 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000815 gfar_write_filer(priv, priv->cur_filer_idx,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000816 local_rqfcr[k], local_rqfpr[k]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000817 if (!priv->cur_filer_idx)
818 break;
819 priv->cur_filer_idx = priv->cur_filer_idx - 1;
820 }
821
Wang Shaoyan588dc912011-08-11 17:07:25 +0000822err:
823 kfree(local_rqfcr);
824 kfree(local_rqfpr);
825 return ret;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000826}
827
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000828static int gfar_set_hash_opts(struct gfar_private *priv,
829 struct ethtool_rxnfc *cmd)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000830{
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000831 /* write the filer rules here */
832 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
Ben Hutchingsbde35282011-04-08 13:45:11 +0000833 return -EINVAL;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000834
835 return 0;
836}
837
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700838static int gfar_check_filer_hardware(struct gfar_private *priv)
839{
Claudiu Manoil42851e82014-01-14 15:35:00 +0200840 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700841 u32 i;
842
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700843 /* Check if we are in FIFO mode */
844 i = gfar_read(&regs->ecntrl);
845 i &= ECNTRL_FIFM;
846 if (i == ECNTRL_FIFM) {
847 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
848 i = gfar_read(&regs->rctrl);
849 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
850 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
851 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000852 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700853 } else {
854 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000855 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700856 return -EOPNOTSUPP;
857 }
858 }
859 /* Or in standard mode */
860 else {
861 i = gfar_read(&regs->rctrl);
862 i &= RCTRL_PRSDEP_MASK;
863 if (i == RCTRL_PRSDEP_MASK) {
864 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000865 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700866 } else {
867 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000868 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700869 return -EOPNOTSUPP;
870 }
871 }
872
873 /* Sets the properties for arbitrary filer rule
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000874 * to the first 4 Layer 4 Bytes
875 */
Claudiu Manoil42851e82014-01-14 15:35:00 +0200876 gfar_write(&regs->rbifx, 0xC0C1C2C3);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700877 return 0;
878}
879
880static int gfar_comp_asc(const void *a, const void *b)
881{
882 return memcmp(a, b, 4);
883}
884
885static int gfar_comp_desc(const void *a, const void *b)
886{
887 return -memcmp(a, b, 4);
888}
889
890static void gfar_swap(void *a, void *b, int size)
891{
892 u32 *_a = a;
893 u32 *_b = b;
894
895 swap(_a[0], _b[0]);
896 swap(_a[1], _b[1]);
897 swap(_a[2], _b[2]);
898 swap(_a[3], _b[3]);
899}
900
901/* Write a mask to filer cache */
902static void gfar_set_mask(u32 mask, struct filer_table *tab)
903{
904 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
905 tab->fe[tab->index].prop = mask;
906 tab->index++;
907}
908
909/* Sets parse bits (e.g. IP or TCP) */
910static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
911{
912 gfar_set_mask(mask, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000913 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
914 RQFCR_AND;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700915 tab->fe[tab->index].prop = value;
916 tab->index++;
917}
918
919static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000920 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700921{
922 gfar_set_mask(mask, tab);
923 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
924 tab->fe[tab->index].prop = value;
925 tab->index++;
926}
927
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000928/* For setting a tuple of value and mask of type flag
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700929 * Example:
930 * IP-Src = 10.0.0.0/255.0.0.0
931 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
932 *
933 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
934 * For a don't care mask it gives us a 0
935 *
936 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
937 * and MAC stuff on an upper level (due to missing information on this level).
938 * For these guys we can discard them if they are value=0 and mask=0.
939 *
940 * Further the all masks are one-padded for better hardware efficiency.
941 */
942static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000943 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700944{
945 switch (flag) {
Sebastian Poehn380b1532011-07-07 04:30:29 -0700946 /* 3bit */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700947 case RQFCR_PID_PRI:
948 if (!(value | mask))
949 return;
950 mask |= RQFCR_PID_PRI_MASK;
951 break;
952 /* 8bit */
953 case RQFCR_PID_L4P:
954 case RQFCR_PID_TOS:
955 if (!~(mask | RQFCR_PID_L4P_MASK))
956 return;
957 if (!mask)
958 mask = ~0;
959 else
960 mask |= RQFCR_PID_L4P_MASK;
961 break;
962 /* 12bit */
963 case RQFCR_PID_VID:
964 if (!(value | mask))
965 return;
966 mask |= RQFCR_PID_VID_MASK;
967 break;
968 /* 16bit */
969 case RQFCR_PID_DPT:
970 case RQFCR_PID_SPT:
971 case RQFCR_PID_ETY:
972 if (!~(mask | RQFCR_PID_PORT_MASK))
973 return;
974 if (!mask)
975 mask = ~0;
976 else
977 mask |= RQFCR_PID_PORT_MASK;
978 break;
979 /* 24bit */
980 case RQFCR_PID_DAH:
981 case RQFCR_PID_DAL:
982 case RQFCR_PID_SAH:
983 case RQFCR_PID_SAL:
984 if (!(value | mask))
985 return;
986 mask |= RQFCR_PID_MAC_MASK;
987 break;
988 /* for all real 32bit masks */
989 default:
990 if (!~mask)
991 return;
992 if (!mask)
993 mask = ~0;
994 break;
995 }
996 gfar_set_general_attribute(value, mask, flag, tab);
997}
998
999/* Translates value and mask for UDP, TCP or SCTP */
1000static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001001 struct ethtool_tcpip4_spec *mask,
1002 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001003{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001004 gfar_set_attribute(be32_to_cpu(value->ip4src),
1005 be32_to_cpu(mask->ip4src),
1006 RQFCR_PID_SIA, tab);
1007 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1008 be32_to_cpu(mask->ip4dst),
1009 RQFCR_PID_DIA, tab);
1010 gfar_set_attribute(be16_to_cpu(value->pdst),
1011 be16_to_cpu(mask->pdst),
1012 RQFCR_PID_DPT, tab);
1013 gfar_set_attribute(be16_to_cpu(value->psrc),
1014 be16_to_cpu(mask->psrc),
1015 RQFCR_PID_SPT, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001016 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1017}
1018
1019/* Translates value and mask for RAW-IP4 */
1020static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001021 struct ethtool_usrip4_spec *mask,
1022 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001023{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001024 gfar_set_attribute(be32_to_cpu(value->ip4src),
1025 be32_to_cpu(mask->ip4src),
1026 RQFCR_PID_SIA, tab);
1027 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1028 be32_to_cpu(mask->ip4dst),
1029 RQFCR_PID_DIA, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001030 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1031 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
Claudiu Manoil42851e82014-01-14 15:35:00 +02001032 gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
1033 be32_to_cpu(mask->l4_4_bytes),
1034 RQFCR_PID_ARB, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001035
1036}
1037
1038/* Translates value and mask for ETHER spec */
1039static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001040 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001041{
1042 u32 upper_temp_mask = 0;
1043 u32 lower_temp_mask = 0;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001044
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001045 /* Source address */
1046 if (!is_broadcast_ether_addr(mask->h_source)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001047 if (is_zero_ether_addr(mask->h_source)) {
1048 upper_temp_mask = 0xFFFFFFFF;
1049 lower_temp_mask = 0xFFFFFFFF;
1050 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001051 upper_temp_mask = mask->h_source[0] << 16 |
1052 mask->h_source[1] << 8 |
1053 mask->h_source[2];
1054 lower_temp_mask = mask->h_source[3] << 16 |
1055 mask->h_source[4] << 8 |
1056 mask->h_source[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001057 }
1058 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001059 gfar_set_attribute(value->h_source[0] << 16 |
1060 value->h_source[1] << 8 |
1061 value->h_source[2],
1062 upper_temp_mask, RQFCR_PID_SAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001063 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001064 gfar_set_attribute(value->h_source[3] << 16 |
1065 value->h_source[4] << 8 |
1066 value->h_source[5],
1067 lower_temp_mask, RQFCR_PID_SAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001068 }
1069 /* Destination address */
1070 if (!is_broadcast_ether_addr(mask->h_dest)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001071 /* Special for destination is limited broadcast */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001072 if ((is_broadcast_ether_addr(value->h_dest) &&
1073 is_zero_ether_addr(mask->h_dest))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001074 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1075 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001076 if (is_zero_ether_addr(mask->h_dest)) {
1077 upper_temp_mask = 0xFFFFFFFF;
1078 lower_temp_mask = 0xFFFFFFFF;
1079 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001080 upper_temp_mask = mask->h_dest[0] << 16 |
1081 mask->h_dest[1] << 8 |
1082 mask->h_dest[2];
1083 lower_temp_mask = mask->h_dest[3] << 16 |
1084 mask->h_dest[4] << 8 |
1085 mask->h_dest[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001086 }
1087
1088 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001089 gfar_set_attribute(value->h_dest[0] << 16 |
1090 value->h_dest[1] << 8 |
1091 value->h_dest[2],
1092 upper_temp_mask, RQFCR_PID_DAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001093 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001094 gfar_set_attribute(value->h_dest[3] << 16 |
1095 value->h_dest[4] << 8 |
1096 value->h_dest[5],
1097 lower_temp_mask, RQFCR_PID_DAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001098 }
1099 }
1100
Claudiu Manoil42851e82014-01-14 15:35:00 +02001101 gfar_set_attribute(be16_to_cpu(value->h_proto),
1102 be16_to_cpu(mask->h_proto),
1103 RQFCR_PID_ETY, tab);
1104}
1105
1106static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1107{
1108 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1109}
1110
1111static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1112{
1113 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1114}
1115
1116static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1117{
1118 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1119}
1120
1121static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1122{
1123 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1124}
1125
1126static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1127{
1128 return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1129 VLAN_PRIO_SHIFT;
1130}
1131
1132static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1133{
1134 return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1135 VLAN_PRIO_SHIFT;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001136}
1137
1138/* Convert a rule to binary filter format of gianfar */
1139static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001140 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001141{
1142 u32 vlan = 0, vlan_mask = 0;
1143 u32 id = 0, id_mask = 0;
1144 u32 cfi = 0, cfi_mask = 0;
1145 u32 prio = 0, prio_mask = 0;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001146 u32 old_index = tab->index;
1147
1148 /* Check if vlan is wanted */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001149 if ((rule->flow_type & FLOW_EXT) &&
1150 (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001151 if (!rule->m_ext.vlan_tci)
Claudiu Manoil42851e82014-01-14 15:35:00 +02001152 rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001153
1154 vlan = RQFPR_VLN;
1155 vlan_mask = RQFPR_VLN;
1156
1157 /* Separate the fields */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001158 id = vlan_tci_vid(rule);
1159 id_mask = vlan_tci_vidm(rule);
1160 cfi = vlan_tci_cfi(rule);
1161 cfi_mask = vlan_tci_cfim(rule);
1162 prio = vlan_tci_prio(rule);
1163 prio_mask = vlan_tci_priom(rule);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001164
Sebastian Poehn380b1532011-07-07 04:30:29 -07001165 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001166 vlan |= RQFPR_CFI;
1167 vlan_mask |= RQFPR_CFI;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001168 } else if (cfi != VLAN_TAG_PRESENT &&
1169 cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001170 vlan_mask |= RQFPR_CFI;
1171 }
1172 }
1173
1174 switch (rule->flow_type & ~FLOW_EXT) {
1175 case TCP_V4_FLOW:
1176 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001177 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001178 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001179 &rule->m_u.tcp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001180 break;
1181 case UDP_V4_FLOW:
1182 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001183 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001184 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001185 &rule->m_u.udp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001186 break;
1187 case SCTP_V4_FLOW:
1188 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001189 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001190 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001191 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1192 (struct ethtool_tcpip4_spec *)&rule->m_u,
1193 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001194 break;
1195 case IP_USER_FLOW:
1196 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001197 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001198 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001199 (struct ethtool_usrip4_spec *) &rule->m_u,
1200 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001201 break;
1202 case ETHER_FLOW:
1203 if (vlan)
1204 gfar_set_parse_bits(vlan, vlan_mask, tab);
1205 gfar_set_ether((struct ethhdr *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001206 (struct ethhdr *) &rule->m_u, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001207 break;
1208 default:
1209 return -1;
1210 }
1211
1212 /* Set the vlan attributes in the end */
1213 if (vlan) {
1214 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1215 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1216 }
1217
1218 /* If there has been nothing written till now, it must be a default */
1219 if (tab->index == old_index) {
1220 gfar_set_mask(0xFFFFFFFF, tab);
1221 tab->fe[tab->index].ctrl = 0x20;
1222 tab->fe[tab->index].prop = 0x0;
1223 tab->index++;
1224 }
1225
1226 /* Remove last AND */
1227 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1228
1229 /* Specify which queue to use or to drop */
1230 if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1231 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1232 else
1233 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1234
1235 /* Only big enough entries can be clustered */
1236 if (tab->index > (old_index + 2)) {
1237 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1238 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1239 }
1240
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001241 /* In rare cases the cache can be full while there is
1242 * free space in hw
1243 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001244 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1245 return -EBUSY;
1246
1247 return 0;
1248}
1249
1250/* Copy size filer entries */
1251static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001252 struct gfar_filer_entry src[0], s32 size)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001253{
1254 while (size > 0) {
1255 size--;
1256 dst[size].ctrl = src[size].ctrl;
1257 dst[size].prop = src[size].prop;
1258 }
1259}
1260
1261/* Delete the contents of the filer-table between start and end
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001262 * and collapse them
1263 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001264static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1265{
1266 int length;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001267
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001268 if (end > MAX_FILER_CACHE_IDX || end < begin)
1269 return -EINVAL;
1270
1271 end++;
1272 length = end - begin;
1273
1274 /* Copy */
1275 while (end < tab->index) {
1276 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1277 tab->fe[begin++].prop = tab->fe[end++].prop;
1278
1279 }
1280 /* Fill up with don't cares */
1281 while (begin < tab->index) {
1282 tab->fe[begin].ctrl = 0x60;
1283 tab->fe[begin].prop = 0xFFFFFFFF;
1284 begin++;
1285 }
1286
1287 tab->index -= length;
1288 return 0;
1289}
1290
1291/* Make space on the wanted location */
1292static int gfar_expand_filer_entries(u32 begin, u32 length,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001293 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001294{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001295 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1296 begin > MAX_FILER_CACHE_IDX)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001297 return -EINVAL;
1298
1299 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001300 tab->index - length + 1);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001301
1302 tab->index += length;
1303 return 0;
1304}
1305
1306static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1307{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001308 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1309 start++) {
1310 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1311 (RQFCR_AND | RQFCR_CLE))
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001312 return start;
1313 }
1314 return -1;
1315}
1316
1317static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1318{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001319 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1320 start++) {
1321 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1322 (RQFCR_CLE))
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001323 return start;
1324 }
1325 return -1;
1326}
1327
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001328/* Uses hardwares clustering option to reduce
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001329 * the number of filer table entries
1330 */
1331static void gfar_cluster_filer(struct filer_table *tab)
1332{
1333 s32 i = -1, j, iend, jend;
1334
1335 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1336 j = i;
1337 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001338 /* The cluster entries self and the previous one
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001339 * (a mask) must be identical!
1340 */
1341 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1342 break;
1343 if (tab->fe[i].prop != tab->fe[j].prop)
1344 break;
1345 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1346 break;
1347 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1348 break;
1349 iend = gfar_get_next_cluster_end(i, tab);
1350 jend = gfar_get_next_cluster_end(j, tab);
1351 if (jend == -1 || iend == -1)
1352 break;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001353
1354 /* First we make some free space, where our cluster
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001355 * element should be. Then we copy it there and finally
1356 * delete in from its old location.
1357 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001358 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1359 -EINVAL)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001360 break;
1361
1362 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001363 &(tab->fe[jend + 1]), jend - j);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001364
1365 if (gfar_trim_filer_entries(jend - 1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001366 jend + (jend - j),
1367 tab) == -EINVAL)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001368 return;
1369
1370 /* Mask out cluster bit */
1371 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1372 }
1373 }
1374}
1375
Sebastian Poehn380b1532011-07-07 04:30:29 -07001376/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1377static void gfar_swap_bits(struct gfar_filer_entry *a1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001378 struct gfar_filer_entry *a2,
1379 struct gfar_filer_entry *b1,
1380 struct gfar_filer_entry *b2, u32 mask)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001381{
1382 u32 temp[4];
Sebastian Poehn380b1532011-07-07 04:30:29 -07001383 temp[0] = a1->ctrl & mask;
1384 temp[1] = a2->ctrl & mask;
1385 temp[2] = b1->ctrl & mask;
1386 temp[3] = b2->ctrl & mask;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001387
Sebastian Poehn380b1532011-07-07 04:30:29 -07001388 a1->ctrl &= ~mask;
1389 a2->ctrl &= ~mask;
1390 b1->ctrl &= ~mask;
1391 b2->ctrl &= ~mask;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001392
1393 a1->ctrl |= temp[1];
1394 a2->ctrl |= temp[0];
1395 b1->ctrl |= temp[3];
1396 b2->ctrl |= temp[2];
1397}
1398
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001399/* Generate a list consisting of masks values with their start and
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001400 * end of validity and block as indicator for parts belonging
1401 * together (glued by ANDs) in mask_table
1402 */
1403static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001404 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001405{
1406 u32 i, and_index = 0, block_index = 1;
1407
1408 for (i = 0; i < tab->index; i++) {
1409
1410 /* LSByte of control = 0 sets a mask */
1411 if (!(tab->fe[i].ctrl & 0xF)) {
1412 mask_table[and_index].mask = tab->fe[i].prop;
1413 mask_table[and_index].start = i;
1414 mask_table[and_index].block = block_index;
1415 if (and_index >= 1)
1416 mask_table[and_index - 1].end = i - 1;
1417 and_index++;
1418 }
Sebastian Poehn380b1532011-07-07 04:30:29 -07001419 /* cluster starts and ends will be separated because they should
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001420 * hold their position
1421 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001422 if (tab->fe[i].ctrl & RQFCR_CLE)
1423 block_index++;
1424 /* A not set AND indicates the end of a depended block */
1425 if (!(tab->fe[i].ctrl & RQFCR_AND))
1426 block_index++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001427 }
1428
1429 mask_table[and_index - 1].end = i - 1;
1430
1431 return and_index;
1432}
1433
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001434/* Sorts the entries of mask_table by the values of the masks.
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001435 * Important: The 0xFF80 flags of the first and last entry of a
1436 * block must hold their position (which queue, CLusterEnable, ReJEct,
1437 * AND)
1438 */
1439static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001440 struct filer_table *temp_table, u32 and_index)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001441{
1442 /* Pointer to compare function (_asc or _desc) */
1443 int (*gfar_comp)(const void *, const void *);
1444
1445 u32 i, size = 0, start = 0, prev = 1;
1446 u32 old_first, old_last, new_first, new_last;
1447
1448 gfar_comp = &gfar_comp_desc;
1449
1450 for (i = 0; i < and_index; i++) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001451 if (prev != mask_table[i].block) {
1452 old_first = mask_table[start].start + 1;
1453 old_last = mask_table[i - 1].end;
1454 sort(mask_table + start, size,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001455 sizeof(struct gfar_mask_entry),
1456 gfar_comp, &gfar_swap);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001457
1458 /* Toggle order for every block. This makes the
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001459 * thing more efficient!
1460 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001461 if (gfar_comp == gfar_comp_desc)
1462 gfar_comp = &gfar_comp_asc;
1463 else
1464 gfar_comp = &gfar_comp_desc;
1465
1466 new_first = mask_table[start].start + 1;
1467 new_last = mask_table[i - 1].end;
1468
Sebastian Poehn380b1532011-07-07 04:30:29 -07001469 gfar_swap_bits(&temp_table->fe[new_first],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001470 &temp_table->fe[old_first],
1471 &temp_table->fe[new_last],
1472 &temp_table->fe[old_last],
1473 RQFCR_QUEUE | RQFCR_CLE |
1474 RQFCR_RJE | RQFCR_AND);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001475
1476 start = i;
1477 size = 0;
1478 }
1479 size++;
1480 prev = mask_table[i].block;
1481 }
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001482}
1483
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001484/* Reduces the number of masks needed in the filer table to save entries
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001485 * This is done by sorting the masks of a depended block. A depended block is
1486 * identified by gluing ANDs or CLE. The sorting order toggles after every
1487 * block. Of course entries in scope of a mask must change their location with
1488 * it.
1489 */
1490static int gfar_optimize_filer_masks(struct filer_table *tab)
1491{
1492 struct filer_table *temp_table;
1493 struct gfar_mask_entry *mask_table;
1494
1495 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1496 s32 ret = 0;
1497
1498 /* We need a copy of the filer table because
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001499 * we want to change its order
1500 */
Thomas Meyerb8ffdbd2011-11-17 13:05:35 +00001501 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001502 if (temp_table == NULL)
1503 return -ENOMEM;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001504
1505 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001506 sizeof(struct gfar_mask_entry), GFP_KERNEL);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001507
1508 if (mask_table == NULL) {
1509 ret = -ENOMEM;
1510 goto end;
1511 }
1512
1513 and_index = gfar_generate_mask_table(mask_table, tab);
1514
1515 gfar_sort_mask_table(mask_table, temp_table, and_index);
1516
1517 /* Now we can copy the data from our duplicated filer table to
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001518 * the real one in the order the mask table says
1519 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001520 for (i = 0; i < and_index; i++) {
1521 size = mask_table[i].end - mask_table[i].start + 1;
1522 gfar_copy_filer_entries(&(tab->fe[j]),
1523 &(temp_table->fe[mask_table[i].start]), size);
1524 j += size;
1525 }
1526
1527 /* And finally we just have to check for duplicated masks and drop the
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001528 * second ones
1529 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001530 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1531 if (tab->fe[i].ctrl == 0x80) {
1532 previous_mask = i++;
1533 break;
1534 }
1535 }
1536 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1537 if (tab->fe[i].ctrl == 0x80) {
1538 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1539 /* Two identical ones found!
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001540 * So drop the second one!
1541 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001542 gfar_trim_filer_entries(i, i, tab);
1543 } else
1544 /* Not identical! */
1545 previous_mask = i;
1546 }
1547 }
1548
1549 kfree(mask_table);
1550end: kfree(temp_table);
1551 return ret;
1552}
1553
1554/* Write the bit-pattern from software's buffer to hardware registers */
1555static int gfar_write_filer_table(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001556 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001557{
1558 u32 i = 0;
1559 if (tab->index > MAX_FILER_IDX - 1)
1560 return -EBUSY;
1561
1562 /* Avoid inconsistent filer table to be processed */
1563 lock_rx_qs(priv);
1564
1565 /* Fill regular entries */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001566 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1567 i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001568 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1569 /* Fill the rest with fall-troughs */
1570 for (; i < MAX_FILER_IDX - 1; i++)
1571 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1572 /* Last entry must be default accept
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001573 * because that's what people expect
1574 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001575 gfar_write_filer(priv, i, 0x20, 0x0);
1576
1577 unlock_rx_qs(priv);
1578
1579 return 0;
1580}
1581
1582static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001583 struct gfar_private *priv)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001584{
1585
1586 if (flow->flow_type & FLOW_EXT) {
1587 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1588 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001589 "User-specific data not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001590 if (~flow->m_ext.vlan_etype)
1591 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001592 "VLAN-etype not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001593 }
1594 if (flow->flow_type == IP_USER_FLOW)
1595 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1596 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001597 "IP-Version differing from IPv4 not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001598
1599 return 0;
1600}
1601
1602static int gfar_process_filer_changes(struct gfar_private *priv)
1603{
1604 struct ethtool_flow_spec_container *j;
1605 struct filer_table *tab;
1606 s32 i = 0;
1607 s32 ret = 0;
1608
1609 /* So index is set to zero, too! */
1610 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1611 if (tab == NULL)
1612 return -ENOMEM;
1613
1614 /* Now convert the existing filer data from flow_spec into
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001615 * filer tables binary format
1616 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001617 list_for_each_entry(j, &priv->rx_list.list, list) {
1618 ret = gfar_convert_to_filer(&j->fs, tab);
1619 if (ret == -EBUSY) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001620 netdev_err(priv->ndev,
1621 "Rule not added: No free space!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001622 goto end;
1623 }
1624 if (ret == -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001625 netdev_err(priv->ndev,
1626 "Rule not added: Unsupported Flow-type!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001627 goto end;
1628 }
1629 }
1630
1631 i = tab->index;
1632
1633 /* Optimizations to save entries */
1634 gfar_cluster_filer(tab);
1635 gfar_optimize_filer_masks(tab);
1636
Joe Perches375d6a12013-04-13 19:03:18 +00001637 pr_debug("\tSummary:\n"
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001638 "\tData on hardware: %d\n"
1639 "\tCompression rate: %d%%\n",
1640 tab->index, 100 - (100 * tab->index) / i);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001641
1642 /* Write everything to hardware */
1643 ret = gfar_write_filer_table(priv, tab);
1644 if (ret == -EBUSY) {
1645 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1646 goto end;
1647 }
1648
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001649end:
1650 kfree(tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001651 return ret;
1652}
1653
1654static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1655{
1656 u32 i = 0;
1657
1658 for (i = 0; i < sizeof(flow->m_u); i++)
1659 flow->m_u.hdata[i] ^= 0xFF;
1660
Claudiu Manoil42851e82014-01-14 15:35:00 +02001661 flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1662 flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1663 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1664 flow->m_ext.data[1] ^= cpu_to_be32(~0);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001665}
1666
1667static int gfar_add_cls(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001668 struct ethtool_rx_flow_spec *flow)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001669{
1670 struct ethtool_flow_spec_container *temp, *comp;
1671 int ret = 0;
1672
1673 temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1674 if (temp == NULL)
1675 return -ENOMEM;
1676 memcpy(&temp->fs, flow, sizeof(temp->fs));
1677
1678 gfar_invert_masks(&temp->fs);
1679 ret = gfar_check_capability(&temp->fs, priv);
1680 if (ret)
1681 goto clean_mem;
1682 /* Link in the new element at the right @location */
1683 if (list_empty(&priv->rx_list.list)) {
1684 ret = gfar_check_filer_hardware(priv);
1685 if (ret != 0)
1686 goto clean_mem;
1687 list_add(&temp->list, &priv->rx_list.list);
1688 goto process;
1689 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001690 list_for_each_entry(comp, &priv->rx_list.list, list) {
1691 if (comp->fs.location > flow->location) {
1692 list_add_tail(&temp->list, &comp->list);
1693 goto process;
1694 }
1695 if (comp->fs.location == flow->location) {
1696 netdev_err(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001697 "Rule not added: ID %d not free!\n",
1698 flow->location);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001699 ret = -EBUSY;
1700 goto clean_mem;
1701 }
1702 }
1703 list_add_tail(&temp->list, &priv->rx_list.list);
1704 }
1705
1706process:
1707 ret = gfar_process_filer_changes(priv);
1708 if (ret)
1709 goto clean_list;
1710 priv->rx_list.count++;
1711 return ret;
1712
1713clean_list:
1714 list_del(&temp->list);
1715clean_mem:
1716 kfree(temp);
1717 return ret;
1718}
1719
1720static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1721{
1722 struct ethtool_flow_spec_container *comp;
1723 u32 ret = -EINVAL;
1724
1725 if (list_empty(&priv->rx_list.list))
1726 return ret;
1727
1728 list_for_each_entry(comp, &priv->rx_list.list, list) {
1729 if (comp->fs.location == loc) {
1730 list_del(&comp->list);
1731 kfree(comp);
1732 priv->rx_list.count--;
1733 gfar_process_filer_changes(priv);
1734 ret = 0;
1735 break;
1736 }
1737 }
1738
1739 return ret;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001740}
1741
1742static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1743{
1744 struct ethtool_flow_spec_container *comp;
1745 u32 ret = -EINVAL;
1746
1747 list_for_each_entry(comp, &priv->rx_list.list, list) {
1748 if (comp->fs.location == cmd->fs.location) {
1749 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1750 gfar_invert_masks(&cmd->fs);
1751 ret = 0;
1752 break;
1753 }
1754 }
1755
1756 return ret;
1757}
1758
1759static int gfar_get_cls_all(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001760 struct ethtool_rxnfc *cmd, u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001761{
1762 struct ethtool_flow_spec_container *comp;
1763 u32 i = 0;
1764
1765 list_for_each_entry(comp, &priv->rx_list.list, list) {
David S. Miller8decf862011-09-22 03:23:13 -04001766 if (i == cmd->rule_cnt)
1767 return -EMSGSIZE;
1768 rule_locs[i] = comp->fs.location;
1769 i++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001770 }
1771
1772 cmd->data = MAX_FILER_IDX;
Ben Hutchings473e64e2011-09-06 13:52:47 +00001773 cmd->rule_cnt = i;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001774
1775 return 0;
1776}
1777
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001778static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1779{
1780 struct gfar_private *priv = netdev_priv(dev);
1781 int ret = 0;
1782
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001783 mutex_lock(&priv->rx_queue_access);
1784
1785 switch (cmd->cmd) {
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001786 case ETHTOOL_SRXFH:
1787 ret = gfar_set_hash_opts(priv, cmd);
1788 break;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001789 case ETHTOOL_SRXCLSRLINS:
Ben Hutchings3a73e492012-01-03 11:59:30 +00001790 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1791 cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1792 cmd->fs.location >= MAX_FILER_IDX) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001793 ret = -EINVAL;
1794 break;
1795 }
1796 ret = gfar_add_cls(priv, &cmd->fs);
1797 break;
1798 case ETHTOOL_SRXCLSRLDEL:
1799 ret = gfar_del_cls(priv, cmd->fs.location);
1800 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001801 default:
1802 ret = -EINVAL;
1803 }
1804
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001805 mutex_unlock(&priv->rx_queue_access);
1806
1807 return ret;
1808}
1809
1810static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001811 u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001812{
1813 struct gfar_private *priv = netdev_priv(dev);
1814 int ret = 0;
1815
1816 switch (cmd->cmd) {
1817 case ETHTOOL_GRXRINGS:
1818 cmd->data = priv->num_rx_queues;
1819 break;
1820 case ETHTOOL_GRXCLSRLCNT:
1821 cmd->rule_cnt = priv->rx_list.count;
1822 break;
1823 case ETHTOOL_GRXCLSRULE:
1824 ret = gfar_get_cls(priv, cmd);
1825 break;
1826 case ETHTOOL_GRXCLSRLALL:
Ben Hutchings815c7db2011-09-06 13:49:12 +00001827 ret = gfar_get_cls_all(priv, cmd, rule_locs);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001828 break;
1829 default:
1830 ret = -EINVAL;
1831 break;
1832 }
1833
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001834 return ret;
1835}
1836
Richard Cochran66636282012-04-03 22:59:19 +00001837int gfar_phc_index = -1;
Richard Cochran28889b72012-09-20 19:11:12 +00001838EXPORT_SYMBOL(gfar_phc_index);
Richard Cochran66636282012-04-03 22:59:19 +00001839
1840static int gfar_get_ts_info(struct net_device *dev,
1841 struct ethtool_ts_info *info)
1842{
1843 struct gfar_private *priv = netdev_priv(dev);
1844
1845 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001846 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1847 SOF_TIMESTAMPING_SOFTWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001848 info->phc_index = -1;
1849 return 0;
1850 }
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001851 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1852 SOF_TIMESTAMPING_RX_HARDWARE |
1853 SOF_TIMESTAMPING_RAW_HARDWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001854 info->phc_index = gfar_phc_index;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001855 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1856 (1 << HWTSTAMP_TX_ON);
1857 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1858 (1 << HWTSTAMP_FILTER_ALL);
Richard Cochran66636282012-04-03 22:59:19 +00001859 return 0;
1860}
1861
Jeff Garzik7282d492006-09-13 14:30:00 -04001862const struct ethtool_ops gfar_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 .get_settings = gfar_gsettings,
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001864 .set_settings = gfar_ssettings,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 .get_drvinfo = gfar_gdrvinfo,
1866 .get_regs_len = gfar_reglen,
1867 .get_regs = gfar_get_regs,
1868 .get_link = ethtool_op_get_link,
1869 .get_coalesce = gfar_gcoalesce,
1870 .set_coalesce = gfar_scoalesce,
1871 .get_ringparam = gfar_gringparam,
1872 .set_ringparam = gfar_sringparam,
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001873 .get_pauseparam = gfar_gpauseparam,
1874 .set_pauseparam = gfar_spauseparam,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 .get_strings = gfar_gstrings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001876 .get_sset_count = gfar_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 .get_ethtool_stats = gfar_fill_stats,
Kumar Gala0bbaf062005-06-20 10:54:21 -05001878 .get_msglevel = gfar_get_msglevel,
1879 .set_msglevel = gfar_set_msglevel,
Scott Woodd87eb122008-07-11 18:04:45 -05001880#ifdef CONFIG_PM
1881 .get_wol = gfar_get_wol,
1882 .set_wol = gfar_set_wol,
1883#endif
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001884 .set_rxnfc = gfar_set_nfc,
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001885 .get_rxnfc = gfar_get_nfc,
Richard Cochran66636282012-04-03 22:59:19 +00001886 .get_ts_info = gfar_get_ts_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887};