blob: 45219d4d09b412be287895a37df7c3c87b2ad96e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Gortmaker3396c782012-01-27 13:36:01 +00002 * drivers/net/ethernet/freescale/gianfar_ethtool.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
7 *
8 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -08009 * Maintainer: Kumar Gala
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +000010 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +000012 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Jeff Garzik6aa20a22006-09-13 13:24:59 -040014 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * by reference.
17 */
18
Joe Perches59deab22011-06-14 08:57:47 +000019#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/string.h>
23#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
David S. Miller65a85a82012-04-06 00:35:34 -040028#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/skbuff.h>
30#include <linux/spinlock.h>
31#include <linux/mm.h>
32
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/uaccess.h>
36#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/crc32.h>
38#include <asm/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ethtool.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040040#include <linux/mii.h>
41#include <linux/phy.h>
Sebastian Poehn4aa3a712011-06-20 13:57:59 -070042#include <linux/sort.h>
Sebastian Poehn380b1532011-07-07 04:30:29 -070043#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include "gianfar.h"
46
Andy Flemingbb40dcb2005-09-23 22:54:21 -040047#define GFAR_MAX_COAL_USECS 0xffff
48#define GFAR_MAX_COAL_FRAMES 0xff
Kumar Gala0bbaf062005-06-20 10:54:21 -050049static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000050 u64 *buf);
Kumar Gala0bbaf062005-06-20 10:54:21 -050051static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +000052static int gfar_gcoalesce(struct net_device *dev,
53 struct ethtool_coalesce *cvals);
54static int gfar_scoalesce(struct net_device *dev,
55 struct ethtool_coalesce *cvals);
56static void gfar_gringparam(struct net_device *dev,
57 struct ethtool_ringparam *rvals);
58static int gfar_sringparam(struct net_device *dev,
59 struct ethtool_ringparam *rvals);
60static void gfar_gdrvinfo(struct net_device *dev,
61 struct ethtool_drvinfo *drvinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Paul Gortmaker30f7e312012-01-08 13:21:57 -050063static const char stat_gstrings[][ETH_GSTRING_LEN] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 "rx-large-frame-errors",
65 "rx-short-frame-errors",
66 "rx-non-octet-errors",
67 "rx-crc-errors",
68 "rx-overrun-errors",
69 "rx-busy-errors",
70 "rx-babbling-errors",
71 "rx-truncated-frames",
72 "ethernet-bus-error",
73 "tx-babbling-errors",
74 "tx-underrun-errors",
75 "rx-skb-missing-errors",
76 "tx-timeout-errors",
77 "tx-rx-64-frames",
78 "tx-rx-65-127-frames",
79 "tx-rx-128-255-frames",
80 "tx-rx-256-511-frames",
81 "tx-rx-512-1023-frames",
82 "tx-rx-1024-1518-frames",
83 "tx-rx-1519-1522-good-vlan",
84 "rx-bytes",
85 "rx-packets",
86 "rx-fcs-errors",
87 "receive-multicast-packet",
88 "receive-broadcast-packet",
89 "rx-control-frame-packets",
90 "rx-pause-frame-packets",
91 "rx-unknown-op-code",
92 "rx-alignment-error",
93 "rx-frame-length-error",
94 "rx-code-error",
95 "rx-carrier-sense-error",
96 "rx-undersize-packets",
97 "rx-oversize-packets",
98 "rx-fragmented-frames",
99 "rx-jabber-frames",
100 "rx-dropped-frames",
101 "tx-byte-counter",
102 "tx-packets",
103 "tx-multicast-packets",
104 "tx-broadcast-packets",
105 "tx-pause-control-frames",
106 "tx-deferral-packets",
107 "tx-excessive-deferral-packets",
108 "tx-single-collision-packets",
109 "tx-multiple-collision-packets",
110 "tx-late-collision-packets",
111 "tx-excessive-collision-packets",
112 "tx-total-collision",
113 "reserved",
114 "tx-dropped-frames",
115 "tx-jabber-frames",
116 "tx-fcs-errors",
117 "tx-control-frames",
118 "tx-oversize-frames",
119 "tx-undersize-frames",
120 "tx-fragmented-frames",
121};
122
Kumar Gala0bbaf062005-06-20 10:54:21 -0500123/* Fill in a buffer with the strings which correspond to the
124 * stats */
125static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
126{
127 struct gfar_private *priv = netdev_priv(dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600128
Andy Flemingb31a1d82008-12-16 15:29:15 -0800129 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
131 else
132 memcpy(buf, stat_gstrings,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000133 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/* Fill in an array of 64-bit statistics from various sources.
137 * This array will be appended to the end of the ethtool_stats
138 * structure, and returned to user space
139 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000140static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
141 u64 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
143 int i;
144 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000145 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Paul Gortmaker212079d2013-02-12 15:38:19 -0500146 atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Paul Gortmaker68719782013-02-12 15:28:35 -0500148 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
Paul Gortmaker212079d2013-02-12 15:38:19 -0500149 buf[i] = atomic64_read(&extra[i]);
Paul Gortmaker68719782013-02-12 15:28:35 -0500150
Andy Flemingb31a1d82008-12-16 15:29:15 -0800151 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Sandeep Gopalpetf4983702009-11-02 07:03:09 +0000152 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Paul Gortmaker68719782013-02-12 15:28:35 -0500154 for (; i < GFAR_STATS_LEN; i++, rmon++)
155 buf[i] = (u64) gfar_read(rmon);
156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700159static int gfar_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700163 switch (sset) {
164 case ETH_SS_STATS:
Andy Flemingb31a1d82008-12-16 15:29:15 -0800165 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700166 return GFAR_STATS_LEN;
167 else
168 return GFAR_EXTRA_STATS_LEN;
169 default:
170 return -EOPNOTSUPP;
171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/* Fills in the drvinfo structure with some basic info */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000175static void gfar_gdrvinfo(struct net_device *dev,
176 struct ethtool_drvinfo *drvinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Jiri Pirko7826d432013-01-06 00:44:26 +0000178 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
179 strlcpy(drvinfo->version, gfar_driver_version,
180 sizeof(drvinfo->version));
181 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
182 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 drvinfo->regdump_len = 0;
184 drvinfo->eedump_len = 0;
185}
186
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400187
188static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
189{
190 struct gfar_private *priv = netdev_priv(dev);
191 struct phy_device *phydev = priv->phydev;
192
193 if (NULL == phydev)
194 return -ENODEV;
195
196 return phy_ethtool_sset(phydev, cmd);
197}
198
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/* Return the current settings in the ethtool_cmd structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500201static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
203 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400204 struct phy_device *phydev = priv->phydev;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000205 struct gfar_priv_rx_q *rx_queue = NULL;
206 struct gfar_priv_tx_q *tx_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400208 if (NULL == phydev)
209 return -ENODEV;
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000210 tx_queue = priv->tx_queue[0];
211 rx_queue = priv->rx_queue[0];
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400212
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000213 /* etsec-1.7 and older versions have only one txic
214 * and rxic regs although they support multiple queues */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000215 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
216 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400218 return phy_ethtool_gset(phydev, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221/* Return the length of the register structure */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500222static int gfar_reglen(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
224 return sizeof (struct gfar);
225}
226
227/* Return a dump of the GFAR register space */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000228static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
229 void *regbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 int i;
232 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000233 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 u32 *buf = (u32 *) regbuf;
235
236 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
Kumar Galacc8c6e32006-02-01 15:18:03 -0600237 buf[i] = gfar_read(&theregs[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/* Convert microseconds to ethernet clock ticks, which changes
241 * depending on what speed the controller is running at */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000242static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
243 unsigned int usecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 unsigned int count;
246
247 /* The timer is different, depending on the interface speed */
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400248 switch (priv->phydev->speed) {
249 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 count = GFAR_GBIT_TIME;
251 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400252 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 count = GFAR_100_TIME;
254 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400255 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 default:
257 count = GFAR_10_TIME;
258 break;
259 }
260
261 /* Make sure we return a number greater than 0
262 * if usecs > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000263 return (usecs * 1000 + count - 1) / count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266/* Convert ethernet clock ticks to microseconds */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000267static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
268 unsigned int ticks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
270 unsigned int count;
271
272 /* The timer is different, depending on the interface speed */
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400273 switch (priv->phydev->speed) {
274 case SPEED_1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 count = GFAR_GBIT_TIME;
276 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400277 case SPEED_100:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 count = GFAR_100_TIME;
279 break;
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400280 case SPEED_10:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 default:
282 count = GFAR_10_TIME;
283 break;
284 }
285
286 /* Make sure we return a number greater than 0 */
287 /* if ticks is > 0 */
Eric Dumazet807540b2010-09-23 05:40:09 +0000288 return (ticks * count) / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291/* Get the coalescing parameters, and put them in the cvals
292 * structure. */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000293static int gfar_gcoalesce(struct net_device *dev,
294 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
296 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000297 struct gfar_priv_rx_q *rx_queue = NULL;
298 struct gfar_priv_tx_q *tx_queue = NULL;
Dai Harukib46a8452008-12-16 15:29:52 -0800299 unsigned long rxtime;
300 unsigned long rxcount;
301 unsigned long txtime;
302 unsigned long txcount;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400303
Andy Flemingb31a1d82008-12-16 15:29:15 -0800304 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500305 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400307 if (NULL == priv->phydev)
308 return -ENODEV;
309
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000310 rx_queue = priv->rx_queue[0];
311 tx_queue = priv->tx_queue[0];
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000312
313 rxtime = get_ictt_value(rx_queue->rxic);
314 rxcount = get_icft_value(rx_queue->rxic);
315 txtime = get_ictt_value(tx_queue->txic);
316 txcount = get_icft_value(tx_queue->txic);
Dai Harukib46a8452008-12-16 15:29:52 -0800317 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
318 cvals->rx_max_coalesced_frames = rxcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Dai Harukib46a8452008-12-16 15:29:52 -0800320 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
321 cvals->tx_max_coalesced_frames = txcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 cvals->use_adaptive_rx_coalesce = 0;
324 cvals->use_adaptive_tx_coalesce = 0;
325
326 cvals->pkt_rate_low = 0;
327 cvals->rx_coalesce_usecs_low = 0;
328 cvals->rx_max_coalesced_frames_low = 0;
329 cvals->tx_coalesce_usecs_low = 0;
330 cvals->tx_max_coalesced_frames_low = 0;
331
332 /* When the packet rate is below pkt_rate_high but above
333 * pkt_rate_low (both measured in packets per second) the
334 * normal {rx,tx}_* coalescing parameters are used.
335 */
336
337 /* When the packet rate is (measured in packets per second)
338 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
339 * used.
340 */
341 cvals->pkt_rate_high = 0;
342 cvals->rx_coalesce_usecs_high = 0;
343 cvals->rx_max_coalesced_frames_high = 0;
344 cvals->tx_coalesce_usecs_high = 0;
345 cvals->tx_max_coalesced_frames_high = 0;
346
347 /* How often to do adaptive coalescing packet rate sampling,
348 * measured in seconds. Must not be zero.
349 */
350 cvals->rate_sample_interval = 0;
351
352 return 0;
353}
354
355/* Change the coalescing values.
356 * Both cvals->*_usecs and cvals->*_frames have to be > 0
357 * in order for coalescing to be active
358 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000359static int gfar_scoalesce(struct net_device *dev,
360 struct ethtool_coalesce *cvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000363 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Andy Flemingb31a1d82008-12-16 15:29:15 -0800365 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
Kumar Gala0bbaf062005-06-20 10:54:21 -0500366 return -EOPNOTSUPP;
367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 /* Set up rx coalescing */
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000369 /* As of now, we will enable/disable coalescing for all
370 * queues together in case of eTSEC2, this will be modified
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000371 * along with the ethtool interface
372 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 if ((cvals->rx_coalesce_usecs == 0) ||
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000374 (cvals->rx_max_coalesced_frames == 0)) {
375 for (i = 0; i < priv->num_rx_queues; i++)
376 priv->rx_queue[i]->rxcoalescing = 0;
377 } else {
378 for (i = 0; i < priv->num_rx_queues; i++)
379 priv->rx_queue[i]->rxcoalescing = 1;
380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400382 if (NULL == priv->phydev)
383 return -ENODEV;
384
385 /* Check the bounds of the values */
386 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
Joe Perches375d6a12013-04-13 19:03:18 +0000387 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
388 GFAR_MAX_COAL_USECS);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400389 return -EINVAL;
390 }
391
392 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
Joe Perches375d6a12013-04-13 19:03:18 +0000393 netdev_info(dev, "Coalescing is limited to %d frames\n",
394 GFAR_MAX_COAL_FRAMES);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400395 return -EINVAL;
396 }
397
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000398 for (i = 0; i < priv->num_rx_queues; i++) {
399 priv->rx_queue[i]->rxic = mk_ic_value(
400 cvals->rx_max_coalesced_frames,
401 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 /* Set up tx coalescing */
405 if ((cvals->tx_coalesce_usecs == 0) ||
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000406 (cvals->tx_max_coalesced_frames == 0)) {
407 for (i = 0; i < priv->num_tx_queues; i++)
408 priv->tx_queue[i]->txcoalescing = 0;
409 } else {
410 for (i = 0; i < priv->num_tx_queues; i++)
411 priv->tx_queue[i]->txcoalescing = 1;
412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400414 /* Check the bounds of the values */
415 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
Joe Perches375d6a12013-04-13 19:03:18 +0000416 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
417 GFAR_MAX_COAL_USECS);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400418 return -EINVAL;
419 }
420
421 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
Joe Perches375d6a12013-04-13 19:03:18 +0000422 netdev_info(dev, "Coalescing is limited to %d frames\n",
423 GFAR_MAX_COAL_FRAMES);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400424 return -EINVAL;
425 }
426
Sandeep Gopalpet46ceb602009-11-02 07:03:34 +0000427 for (i = 0; i < priv->num_tx_queues; i++) {
428 priv->tx_queue[i]->txic = mk_ic_value(
429 cvals->tx_max_coalesced_frames,
430 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Claudiu Manoil800c6442013-03-19 07:40:05 +0000433 gfar_configure_coalescing_all(priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 return 0;
436}
437
438/* Fills in rvals with the current ring parameters. Currently,
439 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
440 * jumbo are ignored by the driver */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000441static void gfar_gringparam(struct net_device *dev,
442 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
444 struct gfar_private *priv = netdev_priv(dev);
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000445 struct gfar_priv_tx_q *tx_queue = NULL;
446 struct gfar_priv_rx_q *rx_queue = NULL;
447
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000448 tx_queue = priv->tx_queue[0];
449 rx_queue = priv->rx_queue[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
452 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
453 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
454 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
455
456 /* Values changeable by the user. The valid values are
457 * in the range 1 to the "*_max_pending" counterpart above.
458 */
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000459 rvals->rx_pending = rx_queue->rx_ring_size;
460 rvals->rx_mini_pending = rx_queue->rx_ring_size;
461 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
462 rvals->tx_pending = tx_queue->tx_ring_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465/* Change the current ring parameters, stopping the controller if
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200466 * necessary so that we don't mess things up while we're in motion.
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000467 */
468static int gfar_sringparam(struct net_device *dev,
469 struct ethtool_ringparam *rvals)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200472 int err = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
475 return -EINVAL;
476
477 if (!is_power_of_2(rvals->rx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000478 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -EINVAL;
480 }
481
482 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
483 return -EINVAL;
484
485 if (!is_power_of_2(rvals->tx_pending)) {
Joe Perches59deab22011-06-14 08:57:47 +0000486 netdev_err(dev, "Ring sizes must be a power of 2\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 return -EINVAL;
488 }
489
Claudiu Manoil08511332014-02-24 12:13:45 +0200490 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
491 cpu_relax();
492
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200493 if (dev->flags & IFF_UP)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200496 /* Change the sizes */
497 for (i = 0; i < priv->num_rx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000498 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200499
500 for (i = 0; i < priv->num_tx_queues; i++)
Sandeep Gopalpetfba4ed02009-11-02 07:03:15 +0000501 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Kumar Gala0bbaf062005-06-20 10:54:21 -0500503 /* Rebuild the rings with the new size */
Claudiu Manoil08511332014-02-24 12:13:45 +0200504 if (dev->flags & IFF_UP)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500505 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200506
507 clear_bit_unlock(GFAR_RESETTING, &priv->state);
508
Kumar Gala0bbaf062005-06-20 10:54:21 -0500509 return err;
510}
511
Claudiu Manoil23402bd2013-08-12 13:53:26 +0300512static void gfar_gpauseparam(struct net_device *dev,
513 struct ethtool_pauseparam *epause)
514{
515 struct gfar_private *priv = netdev_priv(dev);
516
517 epause->autoneg = !!priv->pause_aneg_en;
518 epause->rx_pause = !!priv->rx_pause_en;
519 epause->tx_pause = !!priv->tx_pause_en;
520}
521
522static int gfar_spauseparam(struct net_device *dev,
523 struct ethtool_pauseparam *epause)
524{
525 struct gfar_private *priv = netdev_priv(dev);
526 struct phy_device *phydev = priv->phydev;
527 struct gfar __iomem *regs = priv->gfargrp[0].regs;
528 u32 oldadv, newadv;
529
530 if (!(phydev->supported & SUPPORTED_Pause) ||
531 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
532 (epause->rx_pause != epause->tx_pause)))
533 return -EINVAL;
534
535 priv->rx_pause_en = priv->tx_pause_en = 0;
536 if (epause->rx_pause) {
537 priv->rx_pause_en = 1;
538
539 if (epause->tx_pause) {
540 priv->tx_pause_en = 1;
541 /* FLOW_CTRL_RX & TX */
542 newadv = ADVERTISED_Pause;
543 } else /* FLOW_CTLR_RX */
544 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
545 } else if (epause->tx_pause) {
546 priv->tx_pause_en = 1;
547 /* FLOW_CTLR_TX */
548 newadv = ADVERTISED_Asym_Pause;
549 } else
550 newadv = 0;
551
552 if (epause->autoneg)
553 priv->pause_aneg_en = 1;
554 else
555 priv->pause_aneg_en = 0;
556
557 oldadv = phydev->advertising &
558 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
559 if (oldadv != newadv) {
560 phydev->advertising &=
561 ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
562 phydev->advertising |= newadv;
563 if (phydev->autoneg)
564 /* inform link partner of our
565 * new flow ctrl settings
566 */
567 return phy_start_aneg(phydev);
568
569 if (!epause->autoneg) {
570 u32 tempval;
571 tempval = gfar_read(&regs->maccfg1);
572 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
573 if (priv->tx_pause_en)
574 tempval |= MACCFG1_TX_FLOW;
575 if (priv->rx_pause_en)
576 tempval |= MACCFG1_RX_FLOW;
577 gfar_write(&regs->maccfg1, tempval);
578 }
579 }
580
581 return 0;
582}
583
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000584int gfar_set_features(struct net_device *dev, netdev_features_t features)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500585{
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000586 netdev_features_t changed = dev->features ^ features;
Claudiu Manoil08511332014-02-24 12:13:45 +0200587 struct gfar_private *priv = netdev_priv(dev);
Claudiu Manoil7cca3362014-02-17 12:53:19 +0200588 int err = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500589
Claudiu Manoil88302642014-02-24 12:13:43 +0200590 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
591 NETIF_F_RXCSUM)))
Michał Mirosław8b3afe92011-04-15 04:50:50 +0000592 return 0;
Sandeep Gopalpeta12f8012009-11-02 07:03:00 +0000593
Claudiu Manoil08511332014-02-24 12:13:45 +0200594 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
595 cpu_relax();
596
Claudiu Manoil88302642014-02-24 12:13:43 +0200597 dev->features = features;
598
Kumar Gala0bbaf062005-06-20 10:54:21 -0500599 if (dev->flags & IFF_UP) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500600 /* Now we take down the rings to rebuild them */
601 stop_gfar(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 err = startup_gfar(dev);
Claudiu Manoil08511332014-02-24 12:13:45 +0200603 } else {
604 gfar_mac_reset(priv);
Dai Haruki12dea572008-12-16 15:30:20 -0800605 }
Claudiu Manoil08511332014-02-24 12:13:45 +0200606
607 clear_bit_unlock(GFAR_RESETTING, &priv->state);
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 return err;
610}
611
Kumar Gala0bbaf062005-06-20 10:54:21 -0500612static uint32_t gfar_get_msglevel(struct net_device *dev)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400613{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500614 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000615
Kumar Gala0bbaf062005-06-20 10:54:21 -0500616 return priv->msg_enable;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400617}
618
Kumar Gala0bbaf062005-06-20 10:54:21 -0500619static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400620{
Kumar Gala0bbaf062005-06-20 10:54:21 -0500621 struct gfar_private *priv = netdev_priv(dev);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000622
Kumar Gala0bbaf062005-06-20 10:54:21 -0500623 priv->msg_enable = data;
624}
625
Scott Woodd87eb122008-07-11 18:04:45 -0500626#ifdef CONFIG_PM
627static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
628{
629 struct gfar_private *priv = netdev_priv(dev);
630
Andy Flemingb31a1d82008-12-16 15:29:15 -0800631 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
Scott Woodd87eb122008-07-11 18:04:45 -0500632 wol->supported = WAKE_MAGIC;
633 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
634 } else {
635 wol->supported = wol->wolopts = 0;
636 }
637}
638
639static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
640{
641 struct gfar_private *priv = netdev_priv(dev);
642 unsigned long flags;
643
Andy Flemingb31a1d82008-12-16 15:29:15 -0800644 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
Scott Woodd87eb122008-07-11 18:04:45 -0500645 wol->wolopts != 0)
646 return -EINVAL;
647
648 if (wol->wolopts & ~WAKE_MAGIC)
649 return -EINVAL;
650
Rafael J. Wysocki6c4f1992010-11-09 11:54:19 +0000651 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
652
Scott Woodd87eb122008-07-11 18:04:45 -0500653 spin_lock_irqsave(&priv->bflock, flags);
Rafael J. Wysocki6c4f1992010-11-09 11:54:19 +0000654 priv->wol_en = !!device_may_wakeup(&dev->dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500655 spin_unlock_irqrestore(&priv->bflock, flags);
656
657 return 0;
658}
659#endif
Kumar Gala0bbaf062005-06-20 10:54:21 -0500660
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000661static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
662{
663 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
664
665 if (ethflow & RXH_L2DA) {
666 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000667 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000668 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
669 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000670 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
671 priv->cur_filer_idx = priv->cur_filer_idx - 1;
672
673 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000674 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000675 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
676 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000677 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
678 priv->cur_filer_idx = priv->cur_filer_idx - 1;
679 }
680
681 if (ethflow & RXH_VLAN) {
682 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000683 RQFCR_AND | RQFCR_HASHTBL_0;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000684 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000685 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
686 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000687 priv->cur_filer_idx = priv->cur_filer_idx - 1;
688 }
689
690 if (ethflow & RXH_IP_SRC) {
691 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000692 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000693 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
694 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000695 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
696 priv->cur_filer_idx = priv->cur_filer_idx - 1;
697 }
698
699 if (ethflow & (RXH_IP_DST)) {
700 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000701 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000702 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
703 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000704 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
705 priv->cur_filer_idx = priv->cur_filer_idx - 1;
706 }
707
708 if (ethflow & RXH_L3_PROTO) {
709 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000710 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000711 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
712 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000713 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
714 priv->cur_filer_idx = priv->cur_filer_idx - 1;
715 }
716
717 if (ethflow & RXH_L4_B_0_1) {
718 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000719 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000720 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
721 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000722 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
723 priv->cur_filer_idx = priv->cur_filer_idx - 1;
724 }
725
726 if (ethflow & RXH_L4_B_2_3) {
727 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000728 RQFCR_AND | RQFCR_HASHTBL_0;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000729 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
730 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000731 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
732 priv->cur_filer_idx = priv->cur_filer_idx - 1;
733 }
734}
735
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000736static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
737 u64 class)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000738{
739 unsigned int last_rule_idx = priv->cur_filer_idx;
740 unsigned int cmp_rqfpr;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000741 unsigned int *local_rqfpr;
742 unsigned int *local_rqfcr;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000743 int i = 0x0, k = 0x0;
744 int j = MAX_FILER_IDX, l = 0x0;
Wang Shaoyan588dc912011-08-11 17:07:25 +0000745 int ret = 1;
746
Joe Perchesb2adaca2013-02-03 17:43:58 +0000747 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
748 GFP_KERNEL);
749 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
750 GFP_KERNEL);
Wang Shaoyan588dc912011-08-11 17:07:25 +0000751 if (!local_rqfpr || !local_rqfcr) {
Wang Shaoyan588dc912011-08-11 17:07:25 +0000752 ret = 0;
753 goto err;
754 }
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000755
756 switch (class) {
757 case TCP_V4_FLOW:
758 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
759 break;
760 case UDP_V4_FLOW:
761 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
762 break;
763 case TCP_V6_FLOW:
764 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
765 break;
766 case UDP_V6_FLOW:
767 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
768 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000769 default:
Joe Perches375d6a12013-04-13 19:03:18 +0000770 netdev_err(priv->ndev,
771 "Right now this class is not supported\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000772 ret = 0;
773 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000774 }
775
776 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000777 local_rqfpr[j] = priv->ftp_rqfpr[i];
778 local_rqfcr[j] = priv->ftp_rqfcr[i];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000779 j--;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000780 if ((priv->ftp_rqfcr[i] ==
781 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
782 (priv->ftp_rqfpr[i] == cmp_rqfpr))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000783 break;
784 }
785
786 if (i == MAX_FILER_IDX + 1) {
Joe Perches375d6a12013-04-13 19:03:18 +0000787 netdev_err(priv->ndev,
788 "No parse rule found, can't create hash rules\n");
Wang Shaoyan588dc912011-08-11 17:07:25 +0000789 ret = 0;
790 goto err;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000791 }
792
793 /* If a match was found, then it begins the starting of a cluster rule
794 * if it was already programmed, we need to overwrite these rules
795 */
796 for (l = i+1; l < MAX_FILER_IDX; l++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000797 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000798 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000799 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000800 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000801 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
802 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000803 priv->ftp_rqfpr[l]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000804 break;
805 }
806
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000807 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
808 (priv->ftp_rqfcr[l] & RQFCR_AND))
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000809 continue;
810 else {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000811 local_rqfpr[j] = priv->ftp_rqfpr[l];
812 local_rqfcr[j] = priv->ftp_rqfcr[l];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000813 j--;
814 }
815 }
816
817 priv->cur_filer_idx = l - 1;
818 last_rule_idx = l;
819
820 /* hash rules */
821 ethflow_to_filer_rules(priv, ethflow);
822
823 /* Write back the popped out rules again */
824 for (k = j+1; k < MAX_FILER_IDX; k++) {
Wu Jiajun-B063786c43e042011-06-07 21:46:51 +0000825 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
826 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000827 gfar_write_filer(priv, priv->cur_filer_idx,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000828 local_rqfcr[k], local_rqfpr[k]);
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000829 if (!priv->cur_filer_idx)
830 break;
831 priv->cur_filer_idx = priv->cur_filer_idx - 1;
832 }
833
Wang Shaoyan588dc912011-08-11 17:07:25 +0000834err:
835 kfree(local_rqfcr);
836 kfree(local_rqfpr);
837 return ret;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000838}
839
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000840static int gfar_set_hash_opts(struct gfar_private *priv,
841 struct ethtool_rxnfc *cmd)
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000842{
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000843 /* write the filer rules here */
844 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
Ben Hutchingsbde35282011-04-08 13:45:11 +0000845 return -EINVAL;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +0000846
847 return 0;
848}
849
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700850static int gfar_check_filer_hardware(struct gfar_private *priv)
851{
Claudiu Manoil42851e82014-01-14 15:35:00 +0200852 struct gfar __iomem *regs = priv->gfargrp[0].regs;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700853 u32 i;
854
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700855 /* Check if we are in FIFO mode */
856 i = gfar_read(&regs->ecntrl);
857 i &= ECNTRL_FIFM;
858 if (i == ECNTRL_FIFM) {
859 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
860 i = gfar_read(&regs->rctrl);
861 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
862 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
863 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000864 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700865 } else {
866 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000867 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700868 return -EOPNOTSUPP;
869 }
870 }
871 /* Or in standard mode */
872 else {
873 i = gfar_read(&regs->rctrl);
874 i &= RCTRL_PRSDEP_MASK;
875 if (i == RCTRL_PRSDEP_MASK) {
876 netdev_info(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000877 "Receive Queue Filtering enabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700878 } else {
879 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000880 "Receive Queue Filtering disabled\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700881 return -EOPNOTSUPP;
882 }
883 }
884
885 /* Sets the properties for arbitrary filer rule
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000886 * to the first 4 Layer 4 Bytes
887 */
Claudiu Manoil42851e82014-01-14 15:35:00 +0200888 gfar_write(&regs->rbifx, 0xC0C1C2C3);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700889 return 0;
890}
891
892static int gfar_comp_asc(const void *a, const void *b)
893{
894 return memcmp(a, b, 4);
895}
896
897static int gfar_comp_desc(const void *a, const void *b)
898{
899 return -memcmp(a, b, 4);
900}
901
902static void gfar_swap(void *a, void *b, int size)
903{
904 u32 *_a = a;
905 u32 *_b = b;
906
907 swap(_a[0], _b[0]);
908 swap(_a[1], _b[1]);
909 swap(_a[2], _b[2]);
910 swap(_a[3], _b[3]);
911}
912
913/* Write a mask to filer cache */
914static void gfar_set_mask(u32 mask, struct filer_table *tab)
915{
916 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
917 tab->fe[tab->index].prop = mask;
918 tab->index++;
919}
920
921/* Sets parse bits (e.g. IP or TCP) */
922static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
923{
924 gfar_set_mask(mask, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000925 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
926 RQFCR_AND;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700927 tab->fe[tab->index].prop = value;
928 tab->index++;
929}
930
931static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000932 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700933{
934 gfar_set_mask(mask, tab);
935 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
936 tab->fe[tab->index].prop = value;
937 tab->index++;
938}
939
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000940/* For setting a tuple of value and mask of type flag
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700941 * Example:
942 * IP-Src = 10.0.0.0/255.0.0.0
943 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
944 *
945 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
946 * For a don't care mask it gives us a 0
947 *
948 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
949 * and MAC stuff on an upper level (due to missing information on this level).
950 * For these guys we can discard them if they are value=0 and mask=0.
951 *
952 * Further the all masks are one-padded for better hardware efficiency.
953 */
954static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +0000955 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700956{
957 switch (flag) {
Sebastian Poehn380b1532011-07-07 04:30:29 -0700958 /* 3bit */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -0700959 case RQFCR_PID_PRI:
960 if (!(value | mask))
961 return;
962 mask |= RQFCR_PID_PRI_MASK;
963 break;
964 /* 8bit */
965 case RQFCR_PID_L4P:
966 case RQFCR_PID_TOS:
967 if (!~(mask | RQFCR_PID_L4P_MASK))
968 return;
969 if (!mask)
970 mask = ~0;
971 else
972 mask |= RQFCR_PID_L4P_MASK;
973 break;
974 /* 12bit */
975 case RQFCR_PID_VID:
976 if (!(value | mask))
977 return;
978 mask |= RQFCR_PID_VID_MASK;
979 break;
980 /* 16bit */
981 case RQFCR_PID_DPT:
982 case RQFCR_PID_SPT:
983 case RQFCR_PID_ETY:
984 if (!~(mask | RQFCR_PID_PORT_MASK))
985 return;
986 if (!mask)
987 mask = ~0;
988 else
989 mask |= RQFCR_PID_PORT_MASK;
990 break;
991 /* 24bit */
992 case RQFCR_PID_DAH:
993 case RQFCR_PID_DAL:
994 case RQFCR_PID_SAH:
995 case RQFCR_PID_SAL:
996 if (!(value | mask))
997 return;
998 mask |= RQFCR_PID_MAC_MASK;
999 break;
1000 /* for all real 32bit masks */
1001 default:
1002 if (!~mask)
1003 return;
1004 if (!mask)
1005 mask = ~0;
1006 break;
1007 }
1008 gfar_set_general_attribute(value, mask, flag, tab);
1009}
1010
1011/* Translates value and mask for UDP, TCP or SCTP */
1012static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001013 struct ethtool_tcpip4_spec *mask,
1014 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001015{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001016 gfar_set_attribute(be32_to_cpu(value->ip4src),
1017 be32_to_cpu(mask->ip4src),
1018 RQFCR_PID_SIA, tab);
1019 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1020 be32_to_cpu(mask->ip4dst),
1021 RQFCR_PID_DIA, tab);
1022 gfar_set_attribute(be16_to_cpu(value->pdst),
1023 be16_to_cpu(mask->pdst),
1024 RQFCR_PID_DPT, tab);
1025 gfar_set_attribute(be16_to_cpu(value->psrc),
1026 be16_to_cpu(mask->psrc),
1027 RQFCR_PID_SPT, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001028 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1029}
1030
1031/* Translates value and mask for RAW-IP4 */
1032static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001033 struct ethtool_usrip4_spec *mask,
1034 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001035{
Claudiu Manoil42851e82014-01-14 15:35:00 +02001036 gfar_set_attribute(be32_to_cpu(value->ip4src),
1037 be32_to_cpu(mask->ip4src),
1038 RQFCR_PID_SIA, tab);
1039 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1040 be32_to_cpu(mask->ip4dst),
1041 RQFCR_PID_DIA, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001042 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1043 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
Claudiu Manoil42851e82014-01-14 15:35:00 +02001044 gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
1045 be32_to_cpu(mask->l4_4_bytes),
1046 RQFCR_PID_ARB, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001047
1048}
1049
1050/* Translates value and mask for ETHER spec */
1051static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001052 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001053{
1054 u32 upper_temp_mask = 0;
1055 u32 lower_temp_mask = 0;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001056
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001057 /* Source address */
1058 if (!is_broadcast_ether_addr(mask->h_source)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001059 if (is_zero_ether_addr(mask->h_source)) {
1060 upper_temp_mask = 0xFFFFFFFF;
1061 lower_temp_mask = 0xFFFFFFFF;
1062 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001063 upper_temp_mask = mask->h_source[0] << 16 |
1064 mask->h_source[1] << 8 |
1065 mask->h_source[2];
1066 lower_temp_mask = mask->h_source[3] << 16 |
1067 mask->h_source[4] << 8 |
1068 mask->h_source[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001069 }
1070 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001071 gfar_set_attribute(value->h_source[0] << 16 |
1072 value->h_source[1] << 8 |
1073 value->h_source[2],
1074 upper_temp_mask, RQFCR_PID_SAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001075 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001076 gfar_set_attribute(value->h_source[3] << 16 |
1077 value->h_source[4] << 8 |
1078 value->h_source[5],
1079 lower_temp_mask, RQFCR_PID_SAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001080 }
1081 /* Destination address */
1082 if (!is_broadcast_ether_addr(mask->h_dest)) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001083 /* Special for destination is limited broadcast */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001084 if ((is_broadcast_ether_addr(value->h_dest) &&
1085 is_zero_ether_addr(mask->h_dest))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001086 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1087 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001088 if (is_zero_ether_addr(mask->h_dest)) {
1089 upper_temp_mask = 0xFFFFFFFF;
1090 lower_temp_mask = 0xFFFFFFFF;
1091 } else {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001092 upper_temp_mask = mask->h_dest[0] << 16 |
1093 mask->h_dest[1] << 8 |
1094 mask->h_dest[2];
1095 lower_temp_mask = mask->h_dest[3] << 16 |
1096 mask->h_dest[4] << 8 |
1097 mask->h_dest[5];
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001098 }
1099
1100 /* Upper 24bit */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001101 gfar_set_attribute(value->h_dest[0] << 16 |
1102 value->h_dest[1] << 8 |
1103 value->h_dest[2],
1104 upper_temp_mask, RQFCR_PID_DAH, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001105 /* And the same for the lower part */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001106 gfar_set_attribute(value->h_dest[3] << 16 |
1107 value->h_dest[4] << 8 |
1108 value->h_dest[5],
1109 lower_temp_mask, RQFCR_PID_DAL, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001110 }
1111 }
1112
Claudiu Manoil42851e82014-01-14 15:35:00 +02001113 gfar_set_attribute(be16_to_cpu(value->h_proto),
1114 be16_to_cpu(mask->h_proto),
1115 RQFCR_PID_ETY, tab);
1116}
1117
1118static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1119{
1120 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1121}
1122
1123static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1124{
1125 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1126}
1127
1128static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1129{
1130 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1131}
1132
1133static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1134{
1135 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1136}
1137
1138static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1139{
1140 return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1141 VLAN_PRIO_SHIFT;
1142}
1143
1144static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1145{
1146 return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1147 VLAN_PRIO_SHIFT;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001148}
1149
1150/* Convert a rule to binary filter format of gianfar */
1151static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001152 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001153{
1154 u32 vlan = 0, vlan_mask = 0;
1155 u32 id = 0, id_mask = 0;
1156 u32 cfi = 0, cfi_mask = 0;
1157 u32 prio = 0, prio_mask = 0;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001158 u32 old_index = tab->index;
1159
1160 /* Check if vlan is wanted */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001161 if ((rule->flow_type & FLOW_EXT) &&
1162 (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001163 if (!rule->m_ext.vlan_tci)
Claudiu Manoil42851e82014-01-14 15:35:00 +02001164 rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001165
1166 vlan = RQFPR_VLN;
1167 vlan_mask = RQFPR_VLN;
1168
1169 /* Separate the fields */
Claudiu Manoil42851e82014-01-14 15:35:00 +02001170 id = vlan_tci_vid(rule);
1171 id_mask = vlan_tci_vidm(rule);
1172 cfi = vlan_tci_cfi(rule);
1173 cfi_mask = vlan_tci_cfim(rule);
1174 prio = vlan_tci_prio(rule);
1175 prio_mask = vlan_tci_priom(rule);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001176
Sebastian Poehn380b1532011-07-07 04:30:29 -07001177 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001178 vlan |= RQFPR_CFI;
1179 vlan_mask |= RQFPR_CFI;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001180 } else if (cfi != VLAN_TAG_PRESENT &&
1181 cfi_mask == VLAN_TAG_PRESENT) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001182 vlan_mask |= RQFPR_CFI;
1183 }
1184 }
1185
1186 switch (rule->flow_type & ~FLOW_EXT) {
1187 case TCP_V4_FLOW:
1188 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001189 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001190 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001191 &rule->m_u.tcp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001192 break;
1193 case UDP_V4_FLOW:
1194 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001195 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001196 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001197 &rule->m_u.udp_ip4_spec, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001198 break;
1199 case SCTP_V4_FLOW:
1200 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001201 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001202 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001203 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1204 (struct ethtool_tcpip4_spec *)&rule->m_u,
1205 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001206 break;
1207 case IP_USER_FLOW:
1208 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001209 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001210 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001211 (struct ethtool_usrip4_spec *) &rule->m_u,
1212 tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001213 break;
1214 case ETHER_FLOW:
1215 if (vlan)
1216 gfar_set_parse_bits(vlan, vlan_mask, tab);
1217 gfar_set_ether((struct ethhdr *) &rule->h_u,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001218 (struct ethhdr *) &rule->m_u, tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001219 break;
1220 default:
1221 return -1;
1222 }
1223
1224 /* Set the vlan attributes in the end */
1225 if (vlan) {
1226 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1227 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1228 }
1229
1230 /* If there has been nothing written till now, it must be a default */
1231 if (tab->index == old_index) {
1232 gfar_set_mask(0xFFFFFFFF, tab);
1233 tab->fe[tab->index].ctrl = 0x20;
1234 tab->fe[tab->index].prop = 0x0;
1235 tab->index++;
1236 }
1237
1238 /* Remove last AND */
1239 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1240
1241 /* Specify which queue to use or to drop */
1242 if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1243 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1244 else
1245 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1246
1247 /* Only big enough entries can be clustered */
1248 if (tab->index > (old_index + 2)) {
1249 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1250 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1251 }
1252
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001253 /* In rare cases the cache can be full while there is
1254 * free space in hw
1255 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001256 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1257 return -EBUSY;
1258
1259 return 0;
1260}
1261
1262/* Copy size filer entries */
1263static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001264 struct gfar_filer_entry src[0], s32 size)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001265{
1266 while (size > 0) {
1267 size--;
1268 dst[size].ctrl = src[size].ctrl;
1269 dst[size].prop = src[size].prop;
1270 }
1271}
1272
1273/* Delete the contents of the filer-table between start and end
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001274 * and collapse them
1275 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001276static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1277{
1278 int length;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001279
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001280 if (end > MAX_FILER_CACHE_IDX || end < begin)
1281 return -EINVAL;
1282
1283 end++;
1284 length = end - begin;
1285
1286 /* Copy */
1287 while (end < tab->index) {
1288 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1289 tab->fe[begin++].prop = tab->fe[end++].prop;
1290
1291 }
1292 /* Fill up with don't cares */
1293 while (begin < tab->index) {
1294 tab->fe[begin].ctrl = 0x60;
1295 tab->fe[begin].prop = 0xFFFFFFFF;
1296 begin++;
1297 }
1298
1299 tab->index -= length;
1300 return 0;
1301}
1302
1303/* Make space on the wanted location */
1304static int gfar_expand_filer_entries(u32 begin, u32 length,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001305 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001306{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001307 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1308 begin > MAX_FILER_CACHE_IDX)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001309 return -EINVAL;
1310
1311 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001312 tab->index - length + 1);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001313
1314 tab->index += length;
1315 return 0;
1316}
1317
1318static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1319{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001320 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1321 start++) {
1322 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1323 (RQFCR_AND | RQFCR_CLE))
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001324 return start;
1325 }
1326 return -1;
1327}
1328
1329static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1330{
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001331 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1332 start++) {
1333 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1334 (RQFCR_CLE))
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001335 return start;
1336 }
1337 return -1;
1338}
1339
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001340/* Uses hardwares clustering option to reduce
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001341 * the number of filer table entries
1342 */
1343static void gfar_cluster_filer(struct filer_table *tab)
1344{
1345 s32 i = -1, j, iend, jend;
1346
1347 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1348 j = i;
1349 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001350 /* The cluster entries self and the previous one
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001351 * (a mask) must be identical!
1352 */
1353 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1354 break;
1355 if (tab->fe[i].prop != tab->fe[j].prop)
1356 break;
1357 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1358 break;
1359 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1360 break;
1361 iend = gfar_get_next_cluster_end(i, tab);
1362 jend = gfar_get_next_cluster_end(j, tab);
1363 if (jend == -1 || iend == -1)
1364 break;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001365
1366 /* First we make some free space, where our cluster
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001367 * element should be. Then we copy it there and finally
1368 * delete in from its old location.
1369 */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001370 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1371 -EINVAL)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001372 break;
1373
1374 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001375 &(tab->fe[jend + 1]), jend - j);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001376
1377 if (gfar_trim_filer_entries(jend - 1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001378 jend + (jend - j),
1379 tab) == -EINVAL)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001380 return;
1381
1382 /* Mask out cluster bit */
1383 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1384 }
1385 }
1386}
1387
Sebastian Poehn380b1532011-07-07 04:30:29 -07001388/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1389static void gfar_swap_bits(struct gfar_filer_entry *a1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001390 struct gfar_filer_entry *a2,
1391 struct gfar_filer_entry *b1,
1392 struct gfar_filer_entry *b2, u32 mask)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001393{
1394 u32 temp[4];
Sebastian Poehn380b1532011-07-07 04:30:29 -07001395 temp[0] = a1->ctrl & mask;
1396 temp[1] = a2->ctrl & mask;
1397 temp[2] = b1->ctrl & mask;
1398 temp[3] = b2->ctrl & mask;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001399
Sebastian Poehn380b1532011-07-07 04:30:29 -07001400 a1->ctrl &= ~mask;
1401 a2->ctrl &= ~mask;
1402 b1->ctrl &= ~mask;
1403 b2->ctrl &= ~mask;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001404
1405 a1->ctrl |= temp[1];
1406 a2->ctrl |= temp[0];
1407 b1->ctrl |= temp[3];
1408 b2->ctrl |= temp[2];
1409}
1410
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001411/* Generate a list consisting of masks values with their start and
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001412 * end of validity and block as indicator for parts belonging
1413 * together (glued by ANDs) in mask_table
1414 */
1415static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001416 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001417{
1418 u32 i, and_index = 0, block_index = 1;
1419
1420 for (i = 0; i < tab->index; i++) {
1421
1422 /* LSByte of control = 0 sets a mask */
1423 if (!(tab->fe[i].ctrl & 0xF)) {
1424 mask_table[and_index].mask = tab->fe[i].prop;
1425 mask_table[and_index].start = i;
1426 mask_table[and_index].block = block_index;
1427 if (and_index >= 1)
1428 mask_table[and_index - 1].end = i - 1;
1429 and_index++;
1430 }
Sebastian Poehn380b1532011-07-07 04:30:29 -07001431 /* cluster starts and ends will be separated because they should
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001432 * hold their position
1433 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001434 if (tab->fe[i].ctrl & RQFCR_CLE)
1435 block_index++;
1436 /* A not set AND indicates the end of a depended block */
1437 if (!(tab->fe[i].ctrl & RQFCR_AND))
1438 block_index++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001439 }
1440
1441 mask_table[and_index - 1].end = i - 1;
1442
1443 return and_index;
1444}
1445
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001446/* Sorts the entries of mask_table by the values of the masks.
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001447 * Important: The 0xFF80 flags of the first and last entry of a
1448 * block must hold their position (which queue, CLusterEnable, ReJEct,
1449 * AND)
1450 */
1451static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001452 struct filer_table *temp_table, u32 and_index)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001453{
1454 /* Pointer to compare function (_asc or _desc) */
1455 int (*gfar_comp)(const void *, const void *);
1456
1457 u32 i, size = 0, start = 0, prev = 1;
1458 u32 old_first, old_last, new_first, new_last;
1459
1460 gfar_comp = &gfar_comp_desc;
1461
1462 for (i = 0; i < and_index; i++) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001463 if (prev != mask_table[i].block) {
1464 old_first = mask_table[start].start + 1;
1465 old_last = mask_table[i - 1].end;
1466 sort(mask_table + start, size,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001467 sizeof(struct gfar_mask_entry),
1468 gfar_comp, &gfar_swap);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001469
1470 /* Toggle order for every block. This makes the
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001471 * thing more efficient!
1472 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001473 if (gfar_comp == gfar_comp_desc)
1474 gfar_comp = &gfar_comp_asc;
1475 else
1476 gfar_comp = &gfar_comp_desc;
1477
1478 new_first = mask_table[start].start + 1;
1479 new_last = mask_table[i - 1].end;
1480
Sebastian Poehn380b1532011-07-07 04:30:29 -07001481 gfar_swap_bits(&temp_table->fe[new_first],
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001482 &temp_table->fe[old_first],
1483 &temp_table->fe[new_last],
1484 &temp_table->fe[old_last],
1485 RQFCR_QUEUE | RQFCR_CLE |
1486 RQFCR_RJE | RQFCR_AND);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001487
1488 start = i;
1489 size = 0;
1490 }
1491 size++;
1492 prev = mask_table[i].block;
1493 }
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001494}
1495
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001496/* Reduces the number of masks needed in the filer table to save entries
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001497 * This is done by sorting the masks of a depended block. A depended block is
1498 * identified by gluing ANDs or CLE. The sorting order toggles after every
1499 * block. Of course entries in scope of a mask must change their location with
1500 * it.
1501 */
1502static int gfar_optimize_filer_masks(struct filer_table *tab)
1503{
1504 struct filer_table *temp_table;
1505 struct gfar_mask_entry *mask_table;
1506
1507 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1508 s32 ret = 0;
1509
1510 /* We need a copy of the filer table because
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001511 * we want to change its order
1512 */
Thomas Meyerb8ffdbd2011-11-17 13:05:35 +00001513 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001514 if (temp_table == NULL)
1515 return -ENOMEM;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001516
1517 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001518 sizeof(struct gfar_mask_entry), GFP_KERNEL);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001519
1520 if (mask_table == NULL) {
1521 ret = -ENOMEM;
1522 goto end;
1523 }
1524
1525 and_index = gfar_generate_mask_table(mask_table, tab);
1526
1527 gfar_sort_mask_table(mask_table, temp_table, and_index);
1528
1529 /* Now we can copy the data from our duplicated filer table to
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001530 * the real one in the order the mask table says
1531 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001532 for (i = 0; i < and_index; i++) {
1533 size = mask_table[i].end - mask_table[i].start + 1;
1534 gfar_copy_filer_entries(&(tab->fe[j]),
1535 &(temp_table->fe[mask_table[i].start]), size);
1536 j += size;
1537 }
1538
1539 /* And finally we just have to check for duplicated masks and drop the
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001540 * second ones
1541 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001542 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1543 if (tab->fe[i].ctrl == 0x80) {
1544 previous_mask = i++;
1545 break;
1546 }
1547 }
1548 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1549 if (tab->fe[i].ctrl == 0x80) {
1550 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1551 /* Two identical ones found!
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001552 * So drop the second one!
1553 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001554 gfar_trim_filer_entries(i, i, tab);
1555 } else
1556 /* Not identical! */
1557 previous_mask = i;
1558 }
1559 }
1560
1561 kfree(mask_table);
1562end: kfree(temp_table);
1563 return ret;
1564}
1565
1566/* Write the bit-pattern from software's buffer to hardware registers */
1567static int gfar_write_filer_table(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001568 struct filer_table *tab)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001569{
1570 u32 i = 0;
1571 if (tab->index > MAX_FILER_IDX - 1)
1572 return -EBUSY;
1573
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001574 /* Fill regular entries */
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001575 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1576 i++)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001577 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1578 /* Fill the rest with fall-troughs */
1579 for (; i < MAX_FILER_IDX - 1; i++)
1580 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1581 /* Last entry must be default accept
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001582 * because that's what people expect
1583 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001584 gfar_write_filer(priv, i, 0x20, 0x0);
1585
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001586 return 0;
1587}
1588
1589static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001590 struct gfar_private *priv)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001591{
1592
1593 if (flow->flow_type & FLOW_EXT) {
1594 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1595 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001596 "User-specific data not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001597 if (~flow->m_ext.vlan_etype)
1598 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001599 "VLAN-etype not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001600 }
1601 if (flow->flow_type == IP_USER_FLOW)
1602 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1603 netdev_warn(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001604 "IP-Version differing from IPv4 not supported!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001605
1606 return 0;
1607}
1608
1609static int gfar_process_filer_changes(struct gfar_private *priv)
1610{
1611 struct ethtool_flow_spec_container *j;
1612 struct filer_table *tab;
1613 s32 i = 0;
1614 s32 ret = 0;
1615
1616 /* So index is set to zero, too! */
1617 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1618 if (tab == NULL)
1619 return -ENOMEM;
1620
1621 /* Now convert the existing filer data from flow_spec into
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001622 * filer tables binary format
1623 */
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001624 list_for_each_entry(j, &priv->rx_list.list, list) {
1625 ret = gfar_convert_to_filer(&j->fs, tab);
1626 if (ret == -EBUSY) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001627 netdev_err(priv->ndev,
1628 "Rule not added: No free space!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001629 goto end;
1630 }
1631 if (ret == -1) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001632 netdev_err(priv->ndev,
1633 "Rule not added: Unsupported Flow-type!\n");
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001634 goto end;
1635 }
1636 }
1637
1638 i = tab->index;
1639
1640 /* Optimizations to save entries */
1641 gfar_cluster_filer(tab);
1642 gfar_optimize_filer_masks(tab);
1643
Joe Perches375d6a12013-04-13 19:03:18 +00001644 pr_debug("\tSummary:\n"
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001645 "\tData on hardware: %d\n"
1646 "\tCompression rate: %d%%\n",
1647 tab->index, 100 - (100 * tab->index) / i);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001648
1649 /* Write everything to hardware */
1650 ret = gfar_write_filer_table(priv, tab);
1651 if (ret == -EBUSY) {
1652 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1653 goto end;
1654 }
1655
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001656end:
1657 kfree(tab);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001658 return ret;
1659}
1660
1661static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1662{
1663 u32 i = 0;
1664
1665 for (i = 0; i < sizeof(flow->m_u); i++)
1666 flow->m_u.hdata[i] ^= 0xFF;
1667
Claudiu Manoil42851e82014-01-14 15:35:00 +02001668 flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1669 flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1670 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1671 flow->m_ext.data[1] ^= cpu_to_be32(~0);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001672}
1673
1674static int gfar_add_cls(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001675 struct ethtool_rx_flow_spec *flow)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001676{
1677 struct ethtool_flow_spec_container *temp, *comp;
1678 int ret = 0;
1679
1680 temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1681 if (temp == NULL)
1682 return -ENOMEM;
1683 memcpy(&temp->fs, flow, sizeof(temp->fs));
1684
1685 gfar_invert_masks(&temp->fs);
1686 ret = gfar_check_capability(&temp->fs, priv);
1687 if (ret)
1688 goto clean_mem;
1689 /* Link in the new element at the right @location */
1690 if (list_empty(&priv->rx_list.list)) {
1691 ret = gfar_check_filer_hardware(priv);
1692 if (ret != 0)
1693 goto clean_mem;
1694 list_add(&temp->list, &priv->rx_list.list);
1695 goto process;
1696 } else {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001697 list_for_each_entry(comp, &priv->rx_list.list, list) {
1698 if (comp->fs.location > flow->location) {
1699 list_add_tail(&temp->list, &comp->list);
1700 goto process;
1701 }
1702 if (comp->fs.location == flow->location) {
1703 netdev_err(priv->ndev,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001704 "Rule not added: ID %d not free!\n",
1705 flow->location);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001706 ret = -EBUSY;
1707 goto clean_mem;
1708 }
1709 }
1710 list_add_tail(&temp->list, &priv->rx_list.list);
1711 }
1712
1713process:
1714 ret = gfar_process_filer_changes(priv);
1715 if (ret)
1716 goto clean_list;
1717 priv->rx_list.count++;
1718 return ret;
1719
1720clean_list:
1721 list_del(&temp->list);
1722clean_mem:
1723 kfree(temp);
1724 return ret;
1725}
1726
1727static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1728{
1729 struct ethtool_flow_spec_container *comp;
1730 u32 ret = -EINVAL;
1731
1732 if (list_empty(&priv->rx_list.list))
1733 return ret;
1734
1735 list_for_each_entry(comp, &priv->rx_list.list, list) {
1736 if (comp->fs.location == loc) {
1737 list_del(&comp->list);
1738 kfree(comp);
1739 priv->rx_list.count--;
1740 gfar_process_filer_changes(priv);
1741 ret = 0;
1742 break;
1743 }
1744 }
1745
1746 return ret;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001747}
1748
1749static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1750{
1751 struct ethtool_flow_spec_container *comp;
1752 u32 ret = -EINVAL;
1753
1754 list_for_each_entry(comp, &priv->rx_list.list, list) {
1755 if (comp->fs.location == cmd->fs.location) {
1756 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1757 gfar_invert_masks(&cmd->fs);
1758 ret = 0;
1759 break;
1760 }
1761 }
1762
1763 return ret;
1764}
1765
1766static int gfar_get_cls_all(struct gfar_private *priv,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001767 struct ethtool_rxnfc *cmd, u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001768{
1769 struct ethtool_flow_spec_container *comp;
1770 u32 i = 0;
1771
1772 list_for_each_entry(comp, &priv->rx_list.list, list) {
David S. Miller8decf862011-09-22 03:23:13 -04001773 if (i == cmd->rule_cnt)
1774 return -EMSGSIZE;
1775 rule_locs[i] = comp->fs.location;
1776 i++;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001777 }
1778
1779 cmd->data = MAX_FILER_IDX;
Ben Hutchings473e64e2011-09-06 13:52:47 +00001780 cmd->rule_cnt = i;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001781
1782 return 0;
1783}
1784
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001785static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1786{
1787 struct gfar_private *priv = netdev_priv(dev);
1788 int ret = 0;
1789
Claudiu Manoil08511332014-02-24 12:13:45 +02001790 if (test_bit(GFAR_RESETTING, &priv->state))
1791 return -EBUSY;
1792
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001793 mutex_lock(&priv->rx_queue_access);
1794
1795 switch (cmd->cmd) {
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001796 case ETHTOOL_SRXFH:
1797 ret = gfar_set_hash_opts(priv, cmd);
1798 break;
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001799 case ETHTOOL_SRXCLSRLINS:
Ben Hutchings3a73e492012-01-03 11:59:30 +00001800 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1801 cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1802 cmd->fs.location >= MAX_FILER_IDX) {
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001803 ret = -EINVAL;
1804 break;
1805 }
1806 ret = gfar_add_cls(priv, &cmd->fs);
1807 break;
1808 case ETHTOOL_SRXCLSRLDEL:
1809 ret = gfar_del_cls(priv, cmd->fs.location);
1810 break;
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001811 default:
1812 ret = -EINVAL;
1813 }
1814
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001815 mutex_unlock(&priv->rx_queue_access);
1816
1817 return ret;
1818}
1819
1820static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001821 u32 *rule_locs)
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001822{
1823 struct gfar_private *priv = netdev_priv(dev);
1824 int ret = 0;
1825
1826 switch (cmd->cmd) {
1827 case ETHTOOL_GRXRINGS:
1828 cmd->data = priv->num_rx_queues;
1829 break;
1830 case ETHTOOL_GRXCLSRLCNT:
1831 cmd->rule_cnt = priv->rx_list.count;
1832 break;
1833 case ETHTOOL_GRXCLSRULE:
1834 ret = gfar_get_cls(priv, cmd);
1835 break;
1836 case ETHTOOL_GRXCLSRLALL:
Ben Hutchings815c7db2011-09-06 13:49:12 +00001837 ret = gfar_get_cls_all(priv, cmd, rule_locs);
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001838 break;
1839 default:
1840 ret = -EINVAL;
1841 break;
1842 }
1843
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001844 return ret;
1845}
1846
Richard Cochran66636282012-04-03 22:59:19 +00001847int gfar_phc_index = -1;
Richard Cochran28889b72012-09-20 19:11:12 +00001848EXPORT_SYMBOL(gfar_phc_index);
Richard Cochran66636282012-04-03 22:59:19 +00001849
1850static int gfar_get_ts_info(struct net_device *dev,
1851 struct ethtool_ts_info *info)
1852{
1853 struct gfar_private *priv = netdev_priv(dev);
1854
1855 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001856 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1857 SOF_TIMESTAMPING_SOFTWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001858 info->phc_index = -1;
1859 return 0;
1860 }
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001861 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1862 SOF_TIMESTAMPING_RX_HARDWARE |
1863 SOF_TIMESTAMPING_RAW_HARDWARE;
Richard Cochran66636282012-04-03 22:59:19 +00001864 info->phc_index = gfar_phc_index;
Jan Ceuleerscbfc60712012-06-05 03:42:15 +00001865 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1866 (1 << HWTSTAMP_TX_ON);
1867 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1868 (1 << HWTSTAMP_FILTER_ALL);
Richard Cochran66636282012-04-03 22:59:19 +00001869 return 0;
1870}
1871
Jeff Garzik7282d492006-09-13 14:30:00 -04001872const struct ethtool_ops gfar_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 .get_settings = gfar_gsettings,
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001874 .set_settings = gfar_ssettings,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 .get_drvinfo = gfar_gdrvinfo,
1876 .get_regs_len = gfar_reglen,
1877 .get_regs = gfar_get_regs,
1878 .get_link = ethtool_op_get_link,
1879 .get_coalesce = gfar_gcoalesce,
1880 .set_coalesce = gfar_scoalesce,
1881 .get_ringparam = gfar_gringparam,
1882 .set_ringparam = gfar_sringparam,
Claudiu Manoil23402bd2013-08-12 13:53:26 +03001883 .get_pauseparam = gfar_gpauseparam,
1884 .set_pauseparam = gfar_spauseparam,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 .get_strings = gfar_gstrings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07001886 .get_sset_count = gfar_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 .get_ethtool_stats = gfar_fill_stats,
Kumar Gala0bbaf062005-06-20 10:54:21 -05001888 .get_msglevel = gfar_get_msglevel,
1889 .set_msglevel = gfar_set_msglevel,
Scott Woodd87eb122008-07-11 18:04:45 -05001890#ifdef CONFIG_PM
1891 .get_wol = gfar_get_wol,
1892 .set_wol = gfar_set_wol,
1893#endif
Sandeep Gopalpet7a8b3372009-11-02 07:03:40 +00001894 .set_rxnfc = gfar_set_nfc,
Sebastian Poehn4aa3a712011-06-20 13:57:59 -07001895 .get_rxnfc = gfar_get_nfc,
Richard Cochran66636282012-04-03 22:59:19 +00001896 .get_ts_info = gfar_get_ts_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897};