blob: 20b83f11004accc2cb24267300bce540bc8ff39a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi207070f2013-09-21 14:24:11 +020035#define DRV_VERSION "1.5.1"
Roger Luethi38f49e82010-12-06 00:59:40 +000036#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
Francois Romieufc3e0f82012-01-07 22:39:37 +010042static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000048#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070051static int rx_copybreak = 1518;
52#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Roger Luethib933b4d2006-08-14 23:00:21 -070056/* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000058static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
63 */
64
65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67static const int multicast_filter_limit = 32;
68
69
70/* Operational parameters that are set at compile time. */
71
72/* Keep the ring sizes a power of two for compile efficiency.
73 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 Making the Tx ring too large decreases the effectiveness of channel
75 bonding and packet priority.
76 There are no ill effects from too-large receive rings. */
77#define TX_RING_SIZE 16
78#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070079#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/* Operational parameters that usually are not changed. */
82
83/* Time in jiffies before concluding the transmitter is hung. */
84#define TX_TIMEOUT (2*HZ)
85
86#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
87
88#include <linux/module.h>
89#include <linux/moduleparam.h>
90#include <linux/kernel.h>
91#include <linux/string.h>
92#include <linux/timer.h>
93#include <linux/errno.h>
94#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/interrupt.h>
96#include <linux/pci.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -040097#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/netdevice.h>
99#include <linux/etherdevice.h>
100#include <linux/skbuff.h>
101#include <linux/init.h>
102#include <linux/delay.h>
103#include <linux/mii.h>
104#include <linux/ethtool.h>
105#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000106#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800108#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <asm/processor.h> /* Processor type for cache alignment. */
110#include <asm/io.h>
111#include <asm/irq.h>
112#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100113#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115/* These identify the driver base version and may not be removed. */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500116static const char version[] =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119/* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121#ifdef CONFIG_VIA_RHINE_MMIO
122#define USE_MMIO
123#else
124#endif
125
126MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128MODULE_LICENSE("GPL");
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130module_param(debug, int, 0);
131module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700132module_param(avoid_D3, bool, 0);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100133MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Roger Luethi38f49e82010-12-06 00:59:40 +0000137#define MCAM_SIZE 32
138#define VCAM_SIZE 32
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/*
141 Theory of Operation
142
143I. Board Compatibility
144
145This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146controller.
147
148II. Board-specific settings
149
150Boards with this chip are functional only in a bus-master PCI slot.
151
152Many operational settings are loaded from the EEPROM to the Config word at
153offset 0x78. For most of these settings, this driver assumes that they are
154correct.
155If this driver is compiled to use PCI memory space operations the EEPROM
156must be configured to enable memory ops.
157
158III. Driver operation
159
160IIIa. Ring buffers
161
162This driver uses two statically allocated fixed-size descriptor lists
163formed into rings by a branch from the final descriptor to the beginning of
164the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165
166IIIb/c. Transmit/Receive Structure
167
168This driver attempts to use a zero-copy receive and transmit scheme.
169
170Alas, all data buffers are required to start on a 32 bit boundary, so
171the driver must often copy transmit packets into bounce buffers.
172
173The driver allocates full frame size skbuffs for the Rx ring buffers at
174open() time and passes the skb->data field to the chip as receive data
175buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176a fresh skbuff is allocated and the frame is copied to the new skbuff.
177When the incoming frame is larger, the skbuff is passed directly up the
178protocol stack. Buffers consumed this way are replaced by newly allocated
179skbuffs in the last phase of rhine_rx().
180
181The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182using a full-sized skbuff for small frames vs. the copying costs of larger
183frames. New boards are typically used in generously configured machines
184and the underfilled buffers have negligible impact compared to the benefit of
185a single allocation size, so the default value of zero results in never
186copying packets. When copying is done, the cost is usually mitigated by using
187a combined copy/checksum routine. Copying also preloads the cache, which is
188most useful with small frames.
189
190Since the VIA chips are only able to transfer data to buffers on 32 bit
191boundaries, the IP header at offset 14 in an ethernet frame isn't
192longword aligned for further processing. Copying these unaligned buffers
193has the beneficial effect of 16-byte aligning the IP header.
194
195IIId. Synchronization
196
197The driver runs as two independent, single-threaded flows of control. One
198is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800199netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800203netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204the ring is not available it stops the transmit queue by
205calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207The interrupt handler has exclusive control over the Rx ring and records stats
208from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209empty by incrementing the dirty_tx mark. If at least half of the entries in
210the Rx ring are available the transmit queue is woken up if it was stopped.
211
212IV. Notes
213
214IVb. References
215
216Preliminary VT86C100A manual from http://www.via.com.tw/
217http://www.scyld.com/expert/100mbps.html
218http://www.scyld.com/expert/NWay.html
219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
221
222
223IVc. Errata
224
225The VT86C100A manual is not reliable information.
226The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227in significant performance degradation for bounce buffer copies on transmit
228and unaligned IP headers on receive.
229The chip does not pad to minimum transmit length.
230
231*/
232
233
234/* This table drives the PCI probe routines. It's mostly boilerplate in all
235 of the drivers, and will likely be provided by some future kernel.
236 Note the matching code -- the first table entry matchs all 56** cards but
237 second only the 1234 card.
238*/
239
240enum rhine_revs {
241 VT86C100A = 0x00,
242 VTunknown0 = 0x20,
243 VT6102 = 0x40,
244 VT8231 = 0x50, /* Integrated MAC */
245 VT8233 = 0x60, /* Integrated MAC */
246 VT8235 = 0x74, /* Integrated MAC */
247 VT8237 = 0x78, /* Integrated MAC */
248 VTunknown1 = 0x7C,
249 VT6105 = 0x80,
250 VT6105_B0 = 0x83,
251 VT6105L = 0x8A,
252 VT6107 = 0x8C,
253 VTunknown2 = 0x8E,
254 VT6105M = 0x90, /* Management adapter */
255};
256
257enum rhine_quirks {
258 rqWOL = 0x0001, /* Wake-On-LAN support */
259 rqForceReset = 0x0002,
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */
263};
264/*
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266 * MMIO as well as for the collision counter and the Tx FIFO underflow
267 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
268 */
269
270/* Beware of PCI posted writes */
271#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
272
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000273static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400274 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
275 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
276 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
277 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 { } /* terminate list */
279};
280MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281
282
283/* Offsets to the device registers. */
284enum register_offsets {
285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000286 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 IntrStatus=0x0C, IntrEnable=0x0E,
288 MulticastFilter0=0x10, MulticastFilter1=0x14,
289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299};
300
301/* Bits in ConfigD */
302enum backoff_bits {
303 BackOptional=0x01, BackModify=0x02,
304 BackCaptureEffect=0x04, BackRandom=0x08
305};
306
Roger Luethi38f49e82010-12-06 00:59:40 +0000307/* Bits in the TxConfig (TCR) register */
308enum tcr_bits {
309 TCR_PQEN=0x01,
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
312 TCR_OFSET=0x08,
313 TCR_RTGOPT=0x10,
314 TCR_RTFT0=0x20,
315 TCR_RTFT1=0x40,
316 TCR_RTSF=0x80,
317};
318
319/* Bits in the CamCon (CAMC) register */
320enum camcon_bits {
321 CAMC_CAMEN=0x01,
322 CAMC_VCAMSL=0x02,
323 CAMC_CAMWR=0x04,
324 CAMC_CAMRD=0x08,
325};
326
327/* Bits in the PCIBusConfig1 (BCR1) register */
328enum bcr1_bits {
329 BCR1_POT0=0x01,
330 BCR1_POT1=0x02,
331 BCR1_POT2=0x04,
332 BCR1_CTFT0=0x08,
333 BCR1_CTFT1=0x10,
334 BCR1_CTSF=0x20,
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
339};
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341#ifdef USE_MMIO
342/* Registers we check that mmio and reg are the same. */
343static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 0
346};
347#endif
348
349/* Bits in the interrupt status/mask registers. */
350enum intr_status_bits {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100351 IntrRxDone = 0x0001,
352 IntrTxDone = 0x0002,
353 IntrRxErr = 0x0004,
354 IntrTxError = 0x0008,
355 IntrRxEmpty = 0x0020,
356 IntrPCIErr = 0x0040,
357 IntrStatsMax = 0x0080,
358 IntrRxEarly = 0x0100,
359 IntrTxUnderrun = 0x0210,
360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
369 IntrTxUnderrun,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370};
371
372/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
373enum wol_bits {
374 WOLucast = 0x10,
375 WOLmagic = 0x20,
376 WOLbmcast = 0x30,
377 WOLlnkon = 0x40,
378 WOLlnkoff = 0x80,
379};
380
381/* The Rx and Tx buffer descriptors. */
382struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400383 __le32 rx_status;
384 __le32 desc_length; /* Chain flag, Buffer/frame length */
385 __le32 addr;
386 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387};
388struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400389 __le32 tx_status;
390 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
391 __le32 addr;
392 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393};
394
395/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396#define TXDESC 0x00e08000
397
398enum rx_status_bits {
399 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400};
401
402/* Bits in *_desc.*_status */
403enum desc_status_bits {
404 DescOwn=0x80000000
405};
406
Roger Luethi38f49e82010-12-06 00:59:40 +0000407/* Bits in *_desc.*_length */
408enum desc_length_bits {
409 DescTag=0x00010000
410};
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412/* Bits in ChipCmd. */
413enum chip_cmd_bits {
414 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
418};
419
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000420struct rhine_stats {
421 u64 packets;
422 u64 bytes;
423 struct u64_stats_sync syncp;
424};
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000427 /* Bit mask for configured VLAN ids */
428 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 /* Descriptor rings */
431 struct rx_desc *rx_ring;
432 struct tx_desc *tx_ring;
433 dma_addr_t rx_ring_dma;
434 dma_addr_t tx_ring_dma;
435
436 /* The addresses of receive-in-place skbuffs. */
437 struct sk_buff *rx_skbuff[RX_RING_SIZE];
438 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
439
440 /* The saved address of a sent-in-place packet/buffer, for later free(). */
441 struct sk_buff *tx_skbuff[TX_RING_SIZE];
442 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
443
Roger Luethi4be5de22006-04-04 20:49:16 +0200444 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 unsigned char *tx_buf[TX_RING_SIZE];
446 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma;
448
Alexey Charkovf7630d12014-04-22 19:28:08 +0400449 int revision;
450 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700452 struct net_device *dev;
453 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 spinlock_t lock;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100455 struct mutex task_lock;
456 bool task_enable;
457 struct work_struct slow_event_task;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800458 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Francois Romieufc3e0f82012-01-07 22:39:37 +0100460 u32 msg_enable;
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 /* Frequently used values: keep some adjacent for cache effect. */
463 u32 quirks;
464 struct rx_desc *rx_head_desc;
465 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
466 unsigned int cur_tx, dirty_tx;
467 unsigned int rx_buf_sz; /* Based on MTU+slack. */
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000468 struct rhine_stats rx_stats;
469 struct rhine_stats tx_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 u8 wolopts;
471
472 u8 tx_thresh, rx_thresh;
473
474 struct mii_if_info mii_if;
475 void __iomem *base;
476};
477
Roger Luethi38f49e82010-12-06 00:59:40 +0000478#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
479#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
480#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
481
482#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
483#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
484#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
485
486#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
487#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
488#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
489
490#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
491#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
492#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
493
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495static int mdio_read(struct net_device *dev, int phy_id, int location);
496static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
497static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800498static void rhine_reset_task(struct work_struct *work);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100499static void rhine_slow_event_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000501static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
502 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100503static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700505static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506static void rhine_set_rx_mode(struct net_device *dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000507static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
508 struct rtnl_link_stats64 *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400510static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511static int rhine_close(struct net_device *dev);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000512static int rhine_vlan_rx_add_vid(struct net_device *dev,
513 __be16 proto, u16 vid);
514static int rhine_vlan_rx_kill_vid(struct net_device *dev,
515 __be16 proto, u16 vid);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100516static void rhine_restart_tx(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000518static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
Francois Romieua384a332012-01-07 22:19:36 +0100519{
520 void __iomem *ioaddr = rp->base;
521 int i;
522
523 for (i = 0; i < 1024; i++) {
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000524 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
525
526 if (low ^ has_mask_bits)
Francois Romieua384a332012-01-07 22:19:36 +0100527 break;
528 udelay(10);
529 }
530 if (i > 64) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100531 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000532 "count: %04d\n", low ? "low" : "high", reg, mask, i);
Francois Romieua384a332012-01-07 22:19:36 +0100533 }
534}
535
536static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
537{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000538 rhine_wait_bit(rp, reg, mask, false);
Francois Romieua384a332012-01-07 22:19:36 +0100539}
540
541static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
542{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000543 rhine_wait_bit(rp, reg, mask, true);
Francois Romieua384a332012-01-07 22:19:36 +0100544}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
Francois Romieua20a28b2011-12-30 14:53:58 +0100546static u32 rhine_get_events(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 void __iomem *ioaddr = rp->base;
549 u32 intr_status;
550
551 intr_status = ioread16(ioaddr + IntrStatus);
552 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
553 if (rp->quirks & rqStatusWBRace)
554 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
555 return intr_status;
556}
557
Francois Romieua20a28b2011-12-30 14:53:58 +0100558static void rhine_ack_events(struct rhine_private *rp, u32 mask)
559{
560 void __iomem *ioaddr = rp->base;
561
562 if (rp->quirks & rqStatusWBRace)
563 iowrite8(mask >> 16, ioaddr + IntrStatus2);
564 iowrite16(mask, ioaddr + IntrStatus);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100565 mmiowb();
Francois Romieua20a28b2011-12-30 14:53:58 +0100566}
567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568/*
569 * Get power related registers into sane state.
570 * Notify user about past WOL event.
571 */
572static void rhine_power_init(struct net_device *dev)
573{
574 struct rhine_private *rp = netdev_priv(dev);
575 void __iomem *ioaddr = rp->base;
576 u16 wolstat;
577
578 if (rp->quirks & rqWOL) {
579 /* Make sure chip is in power state D0 */
580 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
581
582 /* Disable "force PME-enable" */
583 iowrite8(0x80, ioaddr + WOLcgClr);
584
585 /* Clear power-event config bits (WOL) */
586 iowrite8(0xFF, ioaddr + WOLcrClr);
587 /* More recent cards can manage two additional patterns */
588 if (rp->quirks & rq6patterns)
589 iowrite8(0x03, ioaddr + WOLcrClr1);
590
591 /* Save power-event status bits */
592 wolstat = ioread8(ioaddr + PwrcsrSet);
593 if (rp->quirks & rq6patterns)
594 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
595
596 /* Clear power-event status bits */
597 iowrite8(0xFF, ioaddr + PwrcsrClr);
598 if (rp->quirks & rq6patterns)
599 iowrite8(0x03, ioaddr + PwrcsrClr1);
600
601 if (wolstat) {
602 char *reason;
603 switch (wolstat) {
604 case WOLmagic:
605 reason = "Magic packet";
606 break;
607 case WOLlnkon:
608 reason = "Link went up";
609 break;
610 case WOLlnkoff:
611 reason = "Link went down";
612 break;
613 case WOLucast:
614 reason = "Unicast packet";
615 break;
616 case WOLbmcast:
617 reason = "Multicast/broadcast packet";
618 break;
619 default:
620 reason = "Unknown";
621 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000622 netdev_info(dev, "Woke system up. Reason: %s\n",
623 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 }
625 }
626}
627
628static void rhine_chip_reset(struct net_device *dev)
629{
630 struct rhine_private *rp = netdev_priv(dev);
631 void __iomem *ioaddr = rp->base;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100632 u8 cmd1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
634 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
635 IOSYNC;
636
637 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000638 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 /* Force reset */
641 if (rp->quirks & rqForceReset)
642 iowrite8(0x40, ioaddr + MiscCmd);
643
644 /* Reset can take somewhat longer (rare) */
Francois Romieua384a332012-01-07 22:19:36 +0100645 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 }
647
Francois Romieufc3e0f82012-01-07 22:39:37 +0100648 cmd1 = ioread8(ioaddr + ChipCmd1);
649 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
650 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651}
652
653#ifdef USE_MMIO
654static void enable_mmio(long pioaddr, u32 quirks)
655{
656 int n;
657 if (quirks & rqRhineI) {
658 /* More recent docs say that this bit is reserved ... */
659 n = inb(pioaddr + ConfigA) | 0x20;
660 outb(n, pioaddr + ConfigA);
661 } else {
662 n = inb(pioaddr + ConfigD) | 0x80;
663 outb(n, pioaddr + ConfigD);
664 }
665}
666#endif
667
668/*
669 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
670 * (plus 0x6C for Rhine-I/II)
671 */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500672static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673{
674 struct rhine_private *rp = netdev_priv(dev);
675 void __iomem *ioaddr = rp->base;
Francois Romieua384a332012-01-07 22:19:36 +0100676 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 outb(0x20, pioaddr + MACRegEEcsr);
Francois Romieua384a332012-01-07 22:19:36 +0100679 for (i = 0; i < 1024; i++) {
680 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
681 break;
682 }
683 if (i > 512)
684 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686#ifdef USE_MMIO
687 /*
688 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
689 * MMIO. If reloading EEPROM was done first this could be avoided, but
690 * it is not known if that still works with the "win98-reboot" problem.
691 */
692 enable_mmio(pioaddr, rp->quirks);
693#endif
694
695 /* Turn off EEPROM-controlled wake-up (magic packet) */
696 if (rp->quirks & rqWOL)
697 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
698
699}
700
701#ifdef CONFIG_NET_POLL_CONTROLLER
702static void rhine_poll(struct net_device *dev)
703{
Francois Romieu05d334e2012-03-09 15:28:18 +0100704 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +0400705 const int irq = rp->irq;
Francois Romieu05d334e2012-03-09 15:28:18 +0100706
707 disable_irq(irq);
708 rhine_interrupt(irq, dev);
709 enable_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710}
711#endif
712
Francois Romieu269f3112011-12-30 14:43:54 +0100713static void rhine_kick_tx_threshold(struct rhine_private *rp)
714{
715 if (rp->tx_thresh < 0xe0) {
716 void __iomem *ioaddr = rp->base;
717
718 rp->tx_thresh += 0x20;
719 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
720 }
721}
722
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100723static void rhine_tx_err(struct rhine_private *rp, u32 status)
724{
725 struct net_device *dev = rp->dev;
726
727 if (status & IntrTxAborted) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100728 netif_info(rp, tx_err, dev,
729 "Abort %08x, frame dropped\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100730 }
731
732 if (status & IntrTxUnderrun) {
733 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100734 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
735 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100736 }
737
Francois Romieufc3e0f82012-01-07 22:39:37 +0100738 if (status & IntrTxDescRace)
739 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100740
741 if ((status & IntrTxError) &&
742 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
743 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100744 netif_info(rp, tx_err, dev, "Unspecified error. "
745 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100746 }
747
748 rhine_restart_tx(dev);
749}
750
751static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
752{
753 void __iomem *ioaddr = rp->base;
754 struct net_device_stats *stats = &rp->dev->stats;
755
756 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
757 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
758
759 /*
760 * Clears the "tally counters" for CRC errors and missed frames(?).
761 * It has been reported that some chips need a write of 0 to clear
762 * these, for others the counters are set to 1 when written to and
763 * instead cleared when read. So we clear them both ways ...
764 */
765 iowrite32(0, ioaddr + RxMissed);
766 ioread16(ioaddr + RxCRCErrs);
767 ioread16(ioaddr + RxMissed);
768}
769
770#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
771 IntrRxErr | \
772 IntrRxEmpty | \
773 IntrRxOverflow | \
774 IntrRxDropped | \
775 IntrRxNoBuf | \
776 IntrRxWakeUp)
777
778#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
779 IntrTxAborted | \
780 IntrTxUnderrun | \
781 IntrTxDescRace)
782#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
783
784#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
785 RHINE_EVENT_NAPI_TX | \
786 IntrStatsMax)
787#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
788#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
789
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700790static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700791{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700792 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
793 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700794 void __iomem *ioaddr = rp->base;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100795 u16 enable_mask = RHINE_EVENT & 0xffff;
796 int work_done = 0;
797 u32 status;
Roger Luethi633949a2006-08-14 23:00:17 -0700798
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100799 status = rhine_get_events(rp);
800 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
801
802 if (status & RHINE_EVENT_NAPI_RX)
803 work_done += rhine_rx(dev, budget);
804
805 if (status & RHINE_EVENT_NAPI_TX) {
806 if (status & RHINE_EVENT_NAPI_TX_ERR) {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100807 /* Avoid scavenging before Tx engine turned off */
Francois Romieua384a332012-01-07 22:19:36 +0100808 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100809 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
810 netif_warn(rp, tx_err, dev, "Tx still on\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100811 }
Francois Romieufc3e0f82012-01-07 22:39:37 +0100812
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100813 rhine_tx(dev);
814
815 if (status & RHINE_EVENT_NAPI_TX_ERR)
816 rhine_tx_err(rp, status);
817 }
818
819 if (status & IntrStatsMax) {
820 spin_lock(&rp->lock);
821 rhine_update_rx_crc_and_missed_errord(rp);
822 spin_unlock(&rp->lock);
823 }
824
825 if (status & RHINE_EVENT_SLOW) {
826 enable_mask &= ~RHINE_EVENT_SLOW;
827 schedule_work(&rp->slow_event_task);
828 }
Roger Luethi633949a2006-08-14 23:00:17 -0700829
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700830 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800831 napi_complete(napi);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100832 iowrite16(enable_mask, ioaddr + IntrEnable);
833 mmiowb();
Roger Luethi633949a2006-08-14 23:00:17 -0700834 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700835 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700836}
Roger Luethi633949a2006-08-14 23:00:17 -0700837
Bill Pemberton76e239e2012-12-03 09:23:48 -0500838static void rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
840 struct rhine_private *rp = netdev_priv(dev);
841
842 /* Reset the chip to erase previous misconfiguration. */
843 rhine_chip_reset(dev);
844
845 /* Rhine-I needs extra time to recuperate before EEPROM reload */
846 if (rp->quirks & rqRhineI)
847 msleep(5);
848
849 /* Reload EEPROM controlled bytes cleared by soft reset */
850 rhine_reload_eeprom(pioaddr, dev);
851}
852
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800853static const struct net_device_ops rhine_netdev_ops = {
854 .ndo_open = rhine_open,
855 .ndo_stop = rhine_close,
856 .ndo_start_xmit = rhine_start_tx,
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000857 .ndo_get_stats64 = rhine_get_stats64,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000858 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000859 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800860 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000861 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800862 .ndo_do_ioctl = netdev_ioctl,
863 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000864 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
865 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800866#ifdef CONFIG_NET_POLL_CONTROLLER
867 .ndo_poll_controller = rhine_poll,
868#endif
869};
870
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +0000871static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872{
873 struct net_device *dev;
874 struct rhine_private *rp;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400875 struct device *hwdev = &pdev->dev;
876 int revision = pdev->revision;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 int i, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 u32 quirks;
879 long pioaddr;
880 long memaddr;
881 void __iomem *ioaddr;
882 int io_size, phy_id;
883 const char *name;
884#ifdef USE_MMIO
885 int bar = 1;
886#else
887 int bar = 0;
888#endif
889
890/* when built into the kernel, we only print version if device is found */
891#ifndef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +0000892 pr_info_once("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893#endif
894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 io_size = 256;
896 phy_id = 0;
897 quirks = 0;
898 name = "Rhine";
Alexey Charkovf7630d12014-04-22 19:28:08 +0400899 if (revision < VTunknown0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 quirks = rqRhineI;
901 io_size = 128;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400902 } else if (revision >= VT6102) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 quirks = rqWOL | rqForceReset;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400904 if (revision < VT6105) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 name = "Rhine II";
906 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
Alexey Charkovf7630d12014-04-22 19:28:08 +0400907 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
Alexey Charkovf7630d12014-04-22 19:28:08 +0400909 if (revision >= VT6105_B0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 quirks |= rq6patterns;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400911 if (revision < VT6105M)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 name = "Rhine III";
913 else
914 name = "Rhine III (Management Adapter)";
915 }
916 }
917
918 rc = pci_enable_device(pdev);
919 if (rc)
920 goto err_out;
921
922 /* this should always be supported */
Alexey Charkovf7630d12014-04-22 19:28:08 +0400923 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +0400925 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
Roger Luethiae996152014-03-18 18:14:01 +0100926 goto err_out_pci_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 }
928
929 /* sanity check */
930 if ((pci_resource_len(pdev, 0) < io_size) ||
931 (pci_resource_len(pdev, 1) < io_size)) {
932 rc = -EIO;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400933 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
Roger Luethiae996152014-03-18 18:14:01 +0100934 goto err_out_pci_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 }
936
937 pioaddr = pci_resource_start(pdev, 0);
938 memaddr = pci_resource_start(pdev, 1);
939
940 pci_set_master(pdev);
941
942 dev = alloc_etherdev(sizeof(struct rhine_private));
943 if (!dev) {
944 rc = -ENOMEM;
Roger Luethiae996152014-03-18 18:14:01 +0100945 goto err_out_pci_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 }
Alexey Charkovf7630d12014-04-22 19:28:08 +0400947 SET_NETDEV_DEV(dev, hwdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700950 rp->dev = dev;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400951 rp->revision = revision;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 rp->quirks = quirks;
953 rp->pioaddr = pioaddr;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100954 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
956 rc = pci_request_regions(pdev, DRV_NAME);
957 if (rc)
958 goto err_out_free_netdev;
959
960 ioaddr = pci_iomap(pdev, bar, io_size);
961 if (!ioaddr) {
962 rc = -EIO;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400963 dev_err(hwdev,
Joe Perchesdf4511f2011-04-16 14:15:25 +0000964 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
Alexey Charkovf7630d12014-04-22 19:28:08 +0400965 dev_name(hwdev), io_size, memaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 goto err_out_free_res;
967 }
968
969#ifdef USE_MMIO
970 enable_mmio(pioaddr, quirks);
971
972 /* Check that selected MMIO registers match the PIO ones */
973 i = 0;
974 while (mmio_verify_registers[i]) {
975 int reg = mmio_verify_registers[i++];
976 unsigned char a = inb(pioaddr+reg);
977 unsigned char b = readb(ioaddr+reg);
978 if (a != b) {
979 rc = -EIO;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400980 dev_err(hwdev,
Joe Perchesdf4511f2011-04-16 14:15:25 +0000981 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
982 reg, a, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 goto err_out_unmap;
984 }
985 }
986#endif /* USE_MMIO */
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 rp->base = ioaddr;
Alexey Charkovf7630d12014-04-22 19:28:08 +0400989 rp->irq = pdev->irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
John Stultz827da442013-10-07 15:51:58 -0700991 u64_stats_init(&rp->tx_stats.syncp);
992 u64_stats_init(&rp->rx_stats.syncp);
993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 /* Get chip registers into a sane state */
995 rhine_power_init(dev);
996 rhine_hw_init(dev, pioaddr);
997
998 for (i = 0; i < 6; i++)
999 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
1000
Joe Perches482e3fe2011-04-16 14:15:26 +00001001 if (!is_valid_ether_addr(dev->dev_addr)) {
1002 /* Report it and use a random ethernet address instead */
1003 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001004 eth_hw_addr_random(dev);
Joe Perches482e3fe2011-04-16 14:15:26 +00001005 netdev_info(dev, "Using random MAC address: %pM\n",
1006 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 }
1008
1009 /* For Rhine-I/II, phy_id is loaded from EEPROM */
1010 if (!phy_id)
1011 phy_id = ioread8(ioaddr + 0x6C);
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 spin_lock_init(&rp->lock);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001014 mutex_init(&rp->task_lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001015 INIT_WORK(&rp->reset_task, rhine_reset_task);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001016 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 rp->mii_if.dev = dev;
1019 rp->mii_if.mdio_read = mdio_read;
1020 rp->mii_if.mdio_write = mdio_write;
1021 rp->mii_if.phy_id_mask = 0x1f;
1022 rp->mii_if.reg_num_mask = 0x1f;
1023
1024 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -08001025 dev->netdev_ops = &rhine_netdev_ops;
wangweidonge76070f2014-03-17 15:52:17 +08001026 dev->ethtool_ops = &netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -08001028
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001029 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +02001030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 if (rp->quirks & rqRhineI)
1032 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1033
Alexey Charkovf7630d12014-04-22 19:28:08 +04001034 if (rp->revision >= VT6105M)
Patrick McHardyf6469682013-04-19 02:04:27 +00001035 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1036 NETIF_F_HW_VLAN_CTAG_RX |
1037 NETIF_F_HW_VLAN_CTAG_FILTER;
Roger Luethi38f49e82010-12-06 00:59:40 +00001038
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 /* dev->name not defined before register_netdev()! */
1040 rc = register_netdev(dev);
1041 if (rc)
1042 goto err_out_unmap;
1043
Joe Perchesdf4511f2011-04-16 14:15:25 +00001044 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1045 name,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046#ifdef USE_MMIO
Joe Perchesdf4511f2011-04-16 14:15:25 +00001047 memaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048#else
Joe Perchesdf4511f2011-04-16 14:15:25 +00001049 (long)ioaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050#endif
Alexey Charkovf7630d12014-04-22 19:28:08 +04001051 dev->dev_addr, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Alexey Charkovf7630d12014-04-22 19:28:08 +04001053 dev_set_drvdata(hwdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055 {
1056 u16 mii_cmd;
1057 int mii_status = mdio_read(dev, phy_id, 1);
1058 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1059 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1060 if (mii_status != 0xffff && mii_status != 0x0000) {
1061 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +00001062 netdev_info(dev,
1063 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1064 phy_id,
1065 mii_status, rp->mii_if.advertising,
1066 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
1068 /* set IFF_RUNNING */
1069 if (mii_status & BMSR_LSTATUS)
1070 netif_carrier_on(dev);
1071 else
1072 netif_carrier_off(dev);
1073
1074 }
1075 }
1076 rp->mii_if.phy_id = phy_id;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001077 if (avoid_D3)
1078 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
1080 return 0;
1081
1082err_out_unmap:
1083 pci_iounmap(pdev, ioaddr);
1084err_out_free_res:
1085 pci_release_regions(pdev);
1086err_out_free_netdev:
1087 free_netdev(dev);
Roger Luethiae996152014-03-18 18:14:01 +01001088err_out_pci_disable:
1089 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090err_out:
1091 return rc;
1092}
1093
1094static int alloc_ring(struct net_device* dev)
1095{
1096 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001097 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 void *ring;
1099 dma_addr_t ring_dma;
1100
Alexey Charkovf7630d12014-04-22 19:28:08 +04001101 ring = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001102 RX_RING_SIZE * sizeof(struct rx_desc) +
1103 TX_RING_SIZE * sizeof(struct tx_desc),
1104 &ring_dma,
1105 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001107 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 return -ENOMEM;
1109 }
1110 if (rp->quirks & rqRhineI) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001111 rp->tx_bufs = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001112 PKT_BUF_SZ * TX_RING_SIZE,
1113 &rp->tx_bufs_dma,
1114 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 if (rp->tx_bufs == NULL) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001116 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001117 RX_RING_SIZE * sizeof(struct rx_desc) +
1118 TX_RING_SIZE * sizeof(struct tx_desc),
1119 ring, ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 return -ENOMEM;
1121 }
1122 }
1123
1124 rp->rx_ring = ring;
1125 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1126 rp->rx_ring_dma = ring_dma;
1127 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1128
1129 return 0;
1130}
1131
1132static void free_ring(struct net_device* dev)
1133{
1134 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001135 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Alexey Charkovf7630d12014-04-22 19:28:08 +04001137 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001138 RX_RING_SIZE * sizeof(struct rx_desc) +
1139 TX_RING_SIZE * sizeof(struct tx_desc),
1140 rp->rx_ring, rp->rx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 rp->tx_ring = NULL;
1142
1143 if (rp->tx_bufs)
Alexey Charkovf7630d12014-04-22 19:28:08 +04001144 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001145 rp->tx_bufs, rp->tx_bufs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 rp->tx_bufs = NULL;
1148
1149}
1150
1151static void alloc_rbufs(struct net_device *dev)
1152{
1153 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001154 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 dma_addr_t next;
1156 int i;
1157
1158 rp->dirty_rx = rp->cur_rx = 0;
1159
1160 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1161 rp->rx_head_desc = &rp->rx_ring[0];
1162 next = rp->rx_ring_dma;
1163
1164 /* Init the ring entries */
1165 for (i = 0; i < RX_RING_SIZE; i++) {
1166 rp->rx_ring[i].rx_status = 0;
1167 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1168 next += sizeof(struct rx_desc);
1169 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1170 rp->rx_skbuff[i] = NULL;
1171 }
1172 /* Mark the last entry as wrapping the ring. */
1173 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1174
1175 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1176 for (i = 0; i < RX_RING_SIZE; i++) {
Kevin Lob26b5552008-08-27 11:35:09 +08001177 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 rp->rx_skbuff[i] = skb;
1179 if (skb == NULL)
1180 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
1182 rp->rx_skbuff_dma[i] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001183 dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001184 DMA_FROM_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001185 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001186 rp->rx_skbuff_dma[i] = 0;
1187 dev_kfree_skb(skb);
1188 break;
1189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1191 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1192 }
1193 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1194}
1195
1196static void free_rbufs(struct net_device* dev)
1197{
1198 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001199 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 int i;
1201
1202 /* Free all the skbuffs in the Rx queue. */
1203 for (i = 0; i < RX_RING_SIZE; i++) {
1204 rp->rx_ring[i].rx_status = 0;
1205 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1206 if (rp->rx_skbuff[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001207 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 rp->rx_skbuff_dma[i],
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001209 rp->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 dev_kfree_skb(rp->rx_skbuff[i]);
1211 }
1212 rp->rx_skbuff[i] = NULL;
1213 }
1214}
1215
1216static void alloc_tbufs(struct net_device* dev)
1217{
1218 struct rhine_private *rp = netdev_priv(dev);
1219 dma_addr_t next;
1220 int i;
1221
1222 rp->dirty_tx = rp->cur_tx = 0;
1223 next = rp->tx_ring_dma;
1224 for (i = 0; i < TX_RING_SIZE; i++) {
1225 rp->tx_skbuff[i] = NULL;
1226 rp->tx_ring[i].tx_status = 0;
1227 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1228 next += sizeof(struct tx_desc);
1229 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001230 if (rp->quirks & rqRhineI)
1231 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 }
1233 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1234
1235}
1236
1237static void free_tbufs(struct net_device* dev)
1238{
1239 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001240 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 int i;
1242
1243 for (i = 0; i < TX_RING_SIZE; i++) {
1244 rp->tx_ring[i].tx_status = 0;
1245 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1246 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1247 if (rp->tx_skbuff[i]) {
1248 if (rp->tx_skbuff_dma[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001249 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 rp->tx_skbuff_dma[i],
1251 rp->tx_skbuff[i]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001252 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 }
1254 dev_kfree_skb(rp->tx_skbuff[i]);
1255 }
1256 rp->tx_skbuff[i] = NULL;
1257 rp->tx_buf[i] = NULL;
1258 }
1259}
1260
1261static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1262{
1263 struct rhine_private *rp = netdev_priv(dev);
1264 void __iomem *ioaddr = rp->base;
1265
Francois Romieufc3e0f82012-01-07 22:39:37 +01001266 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268 if (rp->mii_if.full_duplex)
1269 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1270 ioaddr + ChipCmd1);
1271 else
1272 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1273 ioaddr + ChipCmd1);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001274
1275 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1276 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001277}
1278
1279/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001280static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001281{
Francois Romieufc3e0f82012-01-07 22:39:37 +01001282 struct net_device *dev = mii->dev;
1283 struct rhine_private *rp = netdev_priv(dev);
1284
Roger Luethi00b428c2006-03-28 20:53:56 +02001285 if (mii->force_media) {
1286 /* autoneg is off: Link is always assumed to be up */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001287 if (!netif_carrier_ok(dev))
1288 netif_carrier_on(dev);
1289 } else /* Let MMI library update carrier status */
1290 rhine_check_media(dev, 0);
1291
1292 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1293 mii->force_media, netif_carrier_ok(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
Roger Luethi38f49e82010-12-06 00:59:40 +00001296/**
1297 * rhine_set_cam - set CAM multicast filters
1298 * @ioaddr: register block of this Rhine
1299 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1300 * @addr: multicast address (6 bytes)
1301 *
1302 * Load addresses into multicast filters.
1303 */
1304static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1305{
1306 int i;
1307
1308 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1309 wmb();
1310
1311 /* Paranoid -- idx out of range should never happen */
1312 idx &= (MCAM_SIZE - 1);
1313
1314 iowrite8((u8) idx, ioaddr + CamAddr);
1315
1316 for (i = 0; i < 6; i++, addr++)
1317 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1318 udelay(10);
1319 wmb();
1320
1321 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1322 udelay(10);
1323
1324 iowrite8(0, ioaddr + CamCon);
1325}
1326
1327/**
1328 * rhine_set_vlan_cam - set CAM VLAN filters
1329 * @ioaddr: register block of this Rhine
1330 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1331 * @addr: VLAN ID (2 bytes)
1332 *
1333 * Load addresses into VLAN filters.
1334 */
1335static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1336{
1337 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1338 wmb();
1339
1340 /* Paranoid -- idx out of range should never happen */
1341 idx &= (VCAM_SIZE - 1);
1342
1343 iowrite8((u8) idx, ioaddr + CamAddr);
1344
1345 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1346 udelay(10);
1347 wmb();
1348
1349 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1350 udelay(10);
1351
1352 iowrite8(0, ioaddr + CamCon);
1353}
1354
1355/**
1356 * rhine_set_cam_mask - set multicast CAM mask
1357 * @ioaddr: register block of this Rhine
1358 * @mask: multicast CAM mask
1359 *
1360 * Mask sets multicast filters active/inactive.
1361 */
1362static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1363{
1364 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1365 wmb();
1366
1367 /* write mask */
1368 iowrite32(mask, ioaddr + CamMask);
1369
1370 /* disable CAMEN */
1371 iowrite8(0, ioaddr + CamCon);
1372}
1373
1374/**
1375 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1376 * @ioaddr: register block of this Rhine
1377 * @mask: VLAN CAM mask
1378 *
1379 * Mask sets VLAN filters active/inactive.
1380 */
1381static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1382{
1383 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1384 wmb();
1385
1386 /* write mask */
1387 iowrite32(mask, ioaddr + CamMask);
1388
1389 /* disable CAMEN */
1390 iowrite8(0, ioaddr + CamCon);
1391}
1392
1393/**
1394 * rhine_init_cam_filter - initialize CAM filters
1395 * @dev: network device
1396 *
1397 * Initialize (disable) hardware VLAN and multicast support on this
1398 * Rhine.
1399 */
1400static void rhine_init_cam_filter(struct net_device *dev)
1401{
1402 struct rhine_private *rp = netdev_priv(dev);
1403 void __iomem *ioaddr = rp->base;
1404
1405 /* Disable all CAMs */
1406 rhine_set_vlan_cam_mask(ioaddr, 0);
1407 rhine_set_cam_mask(ioaddr, 0);
1408
1409 /* disable hardware VLAN support */
1410 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1411 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1412}
1413
1414/**
1415 * rhine_update_vcam - update VLAN CAM filters
1416 * @rp: rhine_private data of this Rhine
1417 *
1418 * Update VLAN CAM filters to match configuration change.
1419 */
1420static void rhine_update_vcam(struct net_device *dev)
1421{
1422 struct rhine_private *rp = netdev_priv(dev);
1423 void __iomem *ioaddr = rp->base;
1424 u16 vid;
1425 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1426 unsigned int i = 0;
1427
1428 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1429 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1430 vCAMmask |= 1 << i;
1431 if (++i >= VCAM_SIZE)
1432 break;
1433 }
1434 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1435}
1436
Patrick McHardy80d5c362013-04-19 02:04:28 +00001437static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001438{
1439 struct rhine_private *rp = netdev_priv(dev);
1440
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001441 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001442 set_bit(vid, rp->active_vlans);
1443 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001444 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001445 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001446}
1447
Patrick McHardy80d5c362013-04-19 02:04:28 +00001448static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001449{
1450 struct rhine_private *rp = netdev_priv(dev);
1451
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001452 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001453 clear_bit(vid, rp->active_vlans);
1454 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001455 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001456 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001457}
1458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459static void init_registers(struct net_device *dev)
1460{
1461 struct rhine_private *rp = netdev_priv(dev);
1462 void __iomem *ioaddr = rp->base;
1463 int i;
1464
1465 for (i = 0; i < 6; i++)
1466 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1467
1468 /* Initialize other registers. */
1469 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1470 /* Configure initial FIFO thresholds. */
1471 iowrite8(0x20, ioaddr + TxConfig);
1472 rp->tx_thresh = 0x20;
1473 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1474
1475 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1476 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1477
1478 rhine_set_rx_mode(dev);
1479
Alexey Charkovf7630d12014-04-22 19:28:08 +04001480 if (rp->revision >= VT6105M)
Roger Luethi38f49e82010-12-06 00:59:40 +00001481 rhine_init_cam_filter(dev);
1482
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001483 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001484
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001485 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
1487 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1488 ioaddr + ChipCmd);
1489 rhine_check_media(dev, 1);
1490}
1491
1492/* Enable MII link status auto-polling (required for IntrLinkChange) */
Francois Romieua384a332012-01-07 22:19:36 +01001493static void rhine_enable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494{
Francois Romieua384a332012-01-07 22:19:36 +01001495 void __iomem *ioaddr = rp->base;
1496
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 iowrite8(0, ioaddr + MIICmd);
1498 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1499 iowrite8(0x80, ioaddr + MIICmd);
1500
Francois Romieua384a332012-01-07 22:19:36 +01001501 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1504}
1505
1506/* Disable MII link status auto-polling (required for MDIO access) */
Francois Romieua384a332012-01-07 22:19:36 +01001507static void rhine_disable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
Francois Romieua384a332012-01-07 22:19:36 +01001509 void __iomem *ioaddr = rp->base;
1510
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 iowrite8(0, ioaddr + MIICmd);
1512
Francois Romieua384a332012-01-07 22:19:36 +01001513 if (rp->quirks & rqRhineI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1515
John W. Linville38bb6b22006-05-19 10:51:21 -04001516 /* Can be called from ISR. Evil. */
1517 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
1519 /* 0x80 must be set immediately before turning it off */
1520 iowrite8(0x80, ioaddr + MIICmd);
1521
Francois Romieua384a332012-01-07 22:19:36 +01001522 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
1524 /* Heh. Now clear 0x80 again. */
1525 iowrite8(0, ioaddr + MIICmd);
1526 }
1527 else
Francois Romieua384a332012-01-07 22:19:36 +01001528 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529}
1530
1531/* Read and write over the MII Management Data I/O (MDIO) interface. */
1532
1533static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1534{
1535 struct rhine_private *rp = netdev_priv(dev);
1536 void __iomem *ioaddr = rp->base;
1537 int result;
1538
Francois Romieua384a332012-01-07 22:19:36 +01001539 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 /* rhine_disable_linkmon already cleared MIICmd */
1542 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1543 iowrite8(regnum, ioaddr + MIIRegAddr);
1544 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
Francois Romieua384a332012-01-07 22:19:36 +01001545 rhine_wait_bit_low(rp, MIICmd, 0x40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 result = ioread16(ioaddr + MIIData);
1547
Francois Romieua384a332012-01-07 22:19:36 +01001548 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 return result;
1550}
1551
1552static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1553{
1554 struct rhine_private *rp = netdev_priv(dev);
1555 void __iomem *ioaddr = rp->base;
1556
Francois Romieua384a332012-01-07 22:19:36 +01001557 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 /* rhine_disable_linkmon already cleared MIICmd */
1560 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1561 iowrite8(regnum, ioaddr + MIIRegAddr);
1562 iowrite16(value, ioaddr + MIIData);
1563 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
Francois Romieua384a332012-01-07 22:19:36 +01001564 rhine_wait_bit_low(rp, MIICmd, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
Francois Romieua384a332012-01-07 22:19:36 +01001566 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567}
1568
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001569static void rhine_task_disable(struct rhine_private *rp)
1570{
1571 mutex_lock(&rp->task_lock);
1572 rp->task_enable = false;
1573 mutex_unlock(&rp->task_lock);
1574
1575 cancel_work_sync(&rp->slow_event_task);
1576 cancel_work_sync(&rp->reset_task);
1577}
1578
1579static void rhine_task_enable(struct rhine_private *rp)
1580{
1581 mutex_lock(&rp->task_lock);
1582 rp->task_enable = true;
1583 mutex_unlock(&rp->task_lock);
1584}
1585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586static int rhine_open(struct net_device *dev)
1587{
1588 struct rhine_private *rp = netdev_priv(dev);
1589 void __iomem *ioaddr = rp->base;
1590 int rc;
1591
Alexey Charkovf7630d12014-04-22 19:28:08 +04001592 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 if (rc)
1594 return rc;
1595
Alexey Charkovf7630d12014-04-22 19:28:08 +04001596 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597
1598 rc = alloc_ring(dev);
1599 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001600 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 return rc;
1602 }
1603 alloc_rbufs(dev);
1604 alloc_tbufs(dev);
1605 rhine_chip_reset(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001606 rhine_task_enable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 init_registers(dev);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001608
1609 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1610 __func__, ioread16(ioaddr + ChipCmd),
1611 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613 netif_start_queue(dev);
1614
1615 return 0;
1616}
1617
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001618static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001620 struct rhine_private *rp = container_of(work, struct rhine_private,
1621 reset_task);
1622 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001624 mutex_lock(&rp->task_lock);
1625
1626 if (!rp->task_enable)
1627 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001629 napi_disable(&rp->napi);
Richard Weinbergera9265922014-01-14 22:46:36 +01001630 netif_tx_disable(dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001631 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 /* clear all descriptors */
1634 free_tbufs(dev);
1635 free_rbufs(dev);
1636 alloc_tbufs(dev);
1637 alloc_rbufs(dev);
1638
1639 /* Reinitialize the hardware. */
1640 rhine_chip_reset(dev);
1641 init_registers(dev);
1642
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001643 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001645 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001646 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 netif_wake_queue(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001648
1649out_unlock:
1650 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651}
1652
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001653static void rhine_tx_timeout(struct net_device *dev)
1654{
1655 struct rhine_private *rp = netdev_priv(dev);
1656 void __iomem *ioaddr = rp->base;
1657
Joe Perchesdf4511f2011-04-16 14:15:25 +00001658 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1659 ioread16(ioaddr + IntrStatus),
1660 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001661
1662 schedule_work(&rp->reset_task);
1663}
1664
Stephen Hemminger613573252009-08-31 19:50:58 +00001665static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1666 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
1668 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001669 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 void __iomem *ioaddr = rp->base;
1671 unsigned entry;
1672
1673 /* Caution: the write order is important here, set the field
1674 with the "ownership" bits last. */
1675
1676 /* Calculate the next Tx descriptor entry. */
1677 entry = rp->cur_tx % TX_RING_SIZE;
1678
Herbert Xu5b057c62006-06-23 02:06:41 -07001679 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001680 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
1682 rp->tx_skbuff[entry] = skb;
1683
1684 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001685 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 /* Must use alignment buffer. */
1687 if (skb->len > PKT_BUF_SZ) {
1688 /* packet too long, drop it */
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001689 dev_kfree_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001691 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001692 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001694
1695 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001697 if (skb->len < ETH_ZLEN)
1698 memset(rp->tx_buf[entry] + skb->len, 0,
1699 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 rp->tx_skbuff_dma[entry] = 0;
1701 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1702 (rp->tx_buf[entry] -
1703 rp->tx_bufs));
1704 } else {
1705 rp->tx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001706 dma_map_single(hwdev, skb->data, skb->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001707 DMA_TO_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001708 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001709 dev_kfree_skb_any(skb);
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001710 rp->tx_skbuff_dma[entry] = 0;
1711 dev->stats.tx_dropped++;
1712 return NETDEV_TX_OK;
1713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1715 }
1716
1717 rp->tx_ring[entry].desc_length =
1718 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1719
Roger Luethi38f49e82010-12-06 00:59:40 +00001720 if (unlikely(vlan_tx_tag_present(skb))) {
Roger Luethi207070f2013-09-21 14:24:11 +02001721 u16 vid_pcp = vlan_tx_tag_get(skb);
1722
1723 /* drop CFI/DEI bit, register needs VID and PCP */
1724 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1725 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1726 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
Roger Luethi38f49e82010-12-06 00:59:40 +00001727 /* request tagging */
1728 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1729 }
1730 else
1731 rp->tx_ring[entry].tx_status = 0;
1732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 /* lock eth irq */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001735 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 wmb();
1737
1738 rp->cur_tx++;
1739
1740 /* Non-x86 Todo: explicitly flush cache lines here. */
1741
Roger Luethi38f49e82010-12-06 00:59:40 +00001742 if (vlan_tx_tag_present(skb))
1743 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1744 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1745
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 /* Wake the potentially-idle transmit channel */
1747 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1748 ioaddr + ChipCmd1);
1749 IOSYNC;
1750
1751 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1752 netif_stop_queue(dev);
1753
Francois Romieufc3e0f82012-01-07 22:39:37 +01001754 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1755 rp->cur_tx - 1, entry);
1756
Patrick McHardy6ed10652009-06-23 06:03:08 +00001757 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758}
1759
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001760static void rhine_irq_disable(struct rhine_private *rp)
1761{
1762 iowrite16(0x0000, rp->base + IntrEnable);
1763 mmiowb();
1764}
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766/* The interrupt handler does all of the Rx thread work and cleans up
1767 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001768static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769{
1770 struct net_device *dev = dev_instance;
1771 struct rhine_private *rp = netdev_priv(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001772 u32 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 int handled = 0;
1774
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001775 status = rhine_get_events(rp);
1776
Francois Romieufc3e0f82012-01-07 22:39:37 +01001777 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001778
1779 if (status & RHINE_EVENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 handled = 1;
1781
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001782 rhine_irq_disable(rp);
1783 napi_schedule(&rp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 }
1785
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001786 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001787 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1788 status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001789 }
1790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 return IRQ_RETVAL(handled);
1792}
1793
1794/* This routine is logically part of the interrupt handler, but isolated
1795 for clarity. */
1796static void rhine_tx(struct net_device *dev)
1797{
1798 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001799 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 /* find and cleanup dirty tx descriptors */
1803 while (rp->dirty_tx != rp->cur_tx) {
1804 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001805 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1806 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 if (txstatus & DescOwn)
1808 break;
1809 if (txstatus & 0x8000) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001810 netif_dbg(rp, tx_done, dev,
1811 "Transmit error, Tx status %08x\n", txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001812 dev->stats.tx_errors++;
1813 if (txstatus & 0x0400)
1814 dev->stats.tx_carrier_errors++;
1815 if (txstatus & 0x0200)
1816 dev->stats.tx_window_errors++;
1817 if (txstatus & 0x0100)
1818 dev->stats.tx_aborted_errors++;
1819 if (txstatus & 0x0080)
1820 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1822 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001823 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1825 break; /* Keep the skb - we try again */
1826 }
1827 /* Transmitter restarted in 'abnormal' handler. */
1828 } else {
1829 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001830 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001832 dev->stats.collisions += txstatus & 0x0F;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001833 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1834 (txstatus >> 3) & 0xF, txstatus & 0xF);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001835
1836 u64_stats_update_begin(&rp->tx_stats.syncp);
1837 rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1838 rp->tx_stats.packets++;
1839 u64_stats_update_end(&rp->tx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 }
1841 /* Free the original skb. */
1842 if (rp->tx_skbuff_dma[entry]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001843 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 rp->tx_skbuff_dma[entry],
1845 rp->tx_skbuff[entry]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001846 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 }
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001848 dev_consume_skb_any(rp->tx_skbuff[entry]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 rp->tx_skbuff[entry] = NULL;
1850 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1851 }
1852 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1853 netif_wake_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854}
1855
Roger Luethi38f49e82010-12-06 00:59:40 +00001856/**
1857 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1858 * @skb: pointer to sk_buff
1859 * @data_size: used data area of the buffer including CRC
1860 *
1861 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1862 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1863 * aligned following the CRC.
1864 */
1865static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1866{
1867 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00001868 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00001869}
1870
Roger Luethi633949a2006-08-14 23:00:17 -07001871/* Process up to limit frames from receive ring */
1872static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873{
1874 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001875 struct device *hwdev = dev->dev.parent;
Roger Luethi633949a2006-08-14 23:00:17 -07001876 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 int entry = rp->cur_rx % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878
Francois Romieufc3e0f82012-01-07 22:39:37 +01001879 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1880 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07001883 for (count = 0; count < limit; ++count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 struct rx_desc *desc = rp->rx_head_desc;
1885 u32 desc_status = le32_to_cpu(desc->rx_status);
Roger Luethi38f49e82010-12-06 00:59:40 +00001886 u32 desc_length = le32_to_cpu(desc->desc_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 int data_size = desc_status >> 16;
1888
Roger Luethi633949a2006-08-14 23:00:17 -07001889 if (desc_status & DescOwn)
1890 break;
1891
Francois Romieufc3e0f82012-01-07 22:39:37 +01001892 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1893 desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07001894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1896 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001897 netdev_warn(dev,
1898 "Oversized Ethernet frame spanned multiple buffers, "
1899 "entry %#x length %d status %08x!\n",
1900 entry, data_size,
1901 desc_status);
1902 netdev_warn(dev,
1903 "Oversized Ethernet frame %p vs %p\n",
1904 rp->rx_head_desc,
1905 &rp->rx_ring[entry]);
Eric Dumazet553e2332009-05-27 10:34:50 +00001906 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 } else if (desc_status & RxErr) {
1908 /* There was a error. */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001909 netif_dbg(rp, rx_err, dev,
1910 "%s() Rx error %08x\n", __func__,
1911 desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00001912 dev->stats.rx_errors++;
1913 if (desc_status & 0x0030)
1914 dev->stats.rx_length_errors++;
1915 if (desc_status & 0x0048)
1916 dev->stats.rx_fifo_errors++;
1917 if (desc_status & 0x0004)
1918 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 if (desc_status & 0x0002) {
1920 /* this can also be updated outside the interrupt handler */
1921 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00001922 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 spin_unlock(&rp->lock);
1924 }
1925 }
1926 } else {
Eric Dumazet89d71a62009-10-13 05:34:20 +00001927 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 /* Length should omit the CRC */
1929 int pkt_len = data_size - 4;
Roger Luethi38f49e82010-12-06 00:59:40 +00001930 u16 vlan_tci = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
1932 /* Check if the packet is long enough to accept without
1933 copying to a minimally-sized skbuff. */
Eric Dumazet89d71a62009-10-13 05:34:20 +00001934 if (pkt_len < rx_copybreak)
1935 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1936 if (skb) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001937 dma_sync_single_for_cpu(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001938 rp->rx_skbuff_dma[entry],
1939 rp->rx_buf_sz,
1940 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001942 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07001943 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07001944 pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001946 dma_sync_single_for_device(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001947 rp->rx_skbuff_dma[entry],
1948 rp->rx_buf_sz,
1949 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 } else {
1951 skb = rp->rx_skbuff[entry];
1952 if (skb == NULL) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001953 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 break;
1955 }
1956 rp->rx_skbuff[entry] = NULL;
1957 skb_put(skb, pkt_len);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001958 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 rp->rx_skbuff_dma[entry],
1960 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001961 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 }
Roger Luethi38f49e82010-12-06 00:59:40 +00001963
1964 if (unlikely(desc_length & DescTag))
1965 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00001968
1969 if (unlikely(desc_length & DescTag))
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001970 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
Roger Luethi633949a2006-08-14 23:00:17 -07001971 netif_receive_skb(skb);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001972
1973 u64_stats_update_begin(&rp->rx_stats.syncp);
1974 rp->rx_stats.bytes += pkt_len;
1975 rp->rx_stats.packets++;
1976 u64_stats_update_end(&rp->rx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 }
1978 entry = (++rp->cur_rx) % RX_RING_SIZE;
1979 rp->rx_head_desc = &rp->rx_ring[entry];
1980 }
1981
1982 /* Refill the Rx ring buffers. */
1983 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1984 struct sk_buff *skb;
1985 entry = rp->dirty_rx % RX_RING_SIZE;
1986 if (rp->rx_skbuff[entry] == NULL) {
Kevin Lob26b5552008-08-27 11:35:09 +08001987 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 rp->rx_skbuff[entry] = skb;
1989 if (skb == NULL)
1990 break; /* Better luck next round. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 rp->rx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001992 dma_map_single(hwdev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001994 DMA_FROM_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001995 if (dma_mapping_error(hwdev,
1996 rp->rx_skbuff_dma[entry])) {
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001997 dev_kfree_skb(skb);
1998 rp->rx_skbuff_dma[entry] = 0;
1999 break;
2000 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2002 }
2003 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2004 }
Roger Luethi633949a2006-08-14 23:00:17 -07002005
2006 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007}
2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009static void rhine_restart_tx(struct net_device *dev) {
2010 struct rhine_private *rp = netdev_priv(dev);
2011 void __iomem *ioaddr = rp->base;
2012 int entry = rp->dirty_tx % TX_RING_SIZE;
2013 u32 intr_status;
2014
2015 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002016 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 * In that case the ISR will be back here RSN anyway.
2018 */
Francois Romieua20a28b2011-12-30 14:53:58 +01002019 intr_status = rhine_get_events(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
2021 if ((intr_status & IntrTxErrSummary) == 0) {
2022
2023 /* We know better than the chip where it should continue. */
2024 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2025 ioaddr + TxRingPtr);
2026
2027 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2028 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00002029
2030 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2031 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2032 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2035 ioaddr + ChipCmd1);
2036 IOSYNC;
2037 }
2038 else {
2039 /* This should never happen */
Francois Romieufc3e0f82012-01-07 22:39:37 +01002040 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2041 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 }
2043
2044}
2045
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002046static void rhine_slow_event_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002048 struct rhine_private *rp =
2049 container_of(work, struct rhine_private, slow_event_task);
2050 struct net_device *dev = rp->dev;
2051 u32 intr_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002053 mutex_lock(&rp->task_lock);
2054
2055 if (!rp->task_enable)
2056 goto out_unlock;
2057
2058 intr_status = rhine_get_events(rp);
2059 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04002062 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
Francois Romieufc3e0f82012-01-07 22:39:37 +01002064 if (intr_status & IntrPCIErr)
2065 netif_warn(rp, hw, dev, "PCI error\n");
2066
David S. Miller559bcac2013-01-29 22:58:04 -05002067 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002069out_unlock:
2070 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071}
2072
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002073static struct rtnl_link_stats64 *
2074rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075{
2076 struct rhine_private *rp = netdev_priv(dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002077 unsigned int start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002079 spin_lock_bh(&rp->lock);
2080 rhine_update_rx_crc_and_missed_errord(rp);
2081 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002083 netdev_stats_to_stats64(stats, &dev->stats);
2084
2085 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002086 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002087 stats->rx_packets = rp->rx_stats.packets;
2088 stats->rx_bytes = rp->rx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002089 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002090
2091 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002092 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002093 stats->tx_packets = rp->tx_stats.packets;
2094 stats->tx_bytes = rp->tx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002095 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002096
2097 return stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098}
2099
2100static void rhine_set_rx_mode(struct net_device *dev)
2101{
2102 struct rhine_private *rp = netdev_priv(dev);
2103 void __iomem *ioaddr = rp->base;
2104 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00002105 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2106 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
2108 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 rx_mode = 0x1C;
2110 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2111 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002112 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00002113 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 /* Too many to match, or accept all multicasts. */
2115 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2116 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002117 } else if (rp->revision >= VT6105M) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002118 int i = 0;
2119 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2120 netdev_for_each_mc_addr(ha, dev) {
2121 if (i == MCAM_SIZE)
2122 break;
2123 rhine_set_cam(ioaddr, i, ha->addr);
2124 mCAMmask |= 1 << i;
2125 i++;
2126 }
2127 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00002130 netdev_for_each_mc_addr(ha, dev) {
2131 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
2133 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2134 }
2135 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2136 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002138 /* enable/disable VLAN receive filtering */
Alexey Charkovf7630d12014-04-22 19:28:08 +04002139 if (rp->revision >= VT6105M) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002140 if (dev->flags & IFF_PROMISC)
2141 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2142 else
2143 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2144 }
2145 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146}
2147
2148static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2149{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002150 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
Rick Jones23020ab2011-11-09 09:58:07 +00002152 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2153 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Alexey Charkovf7630d12014-04-22 19:28:08 +04002154 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155}
2156
2157static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2158{
2159 struct rhine_private *rp = netdev_priv(dev);
2160 int rc;
2161
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002162 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 rc = mii_ethtool_gset(&rp->mii_if, cmd);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002164 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 return rc;
2167}
2168
2169static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2170{
2171 struct rhine_private *rp = netdev_priv(dev);
2172 int rc;
2173
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002174 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 rc = mii_ethtool_sset(&rp->mii_if, cmd);
Roger Luethi00b428c2006-03-28 20:53:56 +02002176 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002177 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179 return rc;
2180}
2181
2182static int netdev_nway_reset(struct net_device *dev)
2183{
2184 struct rhine_private *rp = netdev_priv(dev);
2185
2186 return mii_nway_restart(&rp->mii_if);
2187}
2188
2189static u32 netdev_get_link(struct net_device *dev)
2190{
2191 struct rhine_private *rp = netdev_priv(dev);
2192
2193 return mii_link_ok(&rp->mii_if);
2194}
2195
2196static u32 netdev_get_msglevel(struct net_device *dev)
2197{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002198 struct rhine_private *rp = netdev_priv(dev);
2199
2200 return rp->msg_enable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201}
2202
2203static void netdev_set_msglevel(struct net_device *dev, u32 value)
2204{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002205 struct rhine_private *rp = netdev_priv(dev);
2206
2207 rp->msg_enable = value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208}
2209
2210static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2211{
2212 struct rhine_private *rp = netdev_priv(dev);
2213
2214 if (!(rp->quirks & rqWOL))
2215 return;
2216
2217 spin_lock_irq(&rp->lock);
2218 wol->supported = WAKE_PHY | WAKE_MAGIC |
2219 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2220 wol->wolopts = rp->wolopts;
2221 spin_unlock_irq(&rp->lock);
2222}
2223
2224static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2225{
2226 struct rhine_private *rp = netdev_priv(dev);
2227 u32 support = WAKE_PHY | WAKE_MAGIC |
2228 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2229
2230 if (!(rp->quirks & rqWOL))
2231 return -EINVAL;
2232
2233 if (wol->wolopts & ~support)
2234 return -EINVAL;
2235
2236 spin_lock_irq(&rp->lock);
2237 rp->wolopts = wol->wolopts;
2238 spin_unlock_irq(&rp->lock);
2239
2240 return 0;
2241}
2242
Jeff Garzik7282d492006-09-13 14:30:00 -04002243static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 .get_drvinfo = netdev_get_drvinfo,
2245 .get_settings = netdev_get_settings,
2246 .set_settings = netdev_set_settings,
2247 .nway_reset = netdev_nway_reset,
2248 .get_link = netdev_get_link,
2249 .get_msglevel = netdev_get_msglevel,
2250 .set_msglevel = netdev_set_msglevel,
2251 .get_wol = rhine_get_wol,
2252 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253};
2254
2255static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2256{
2257 struct rhine_private *rp = netdev_priv(dev);
2258 int rc;
2259
2260 if (!netif_running(dev))
2261 return -EINVAL;
2262
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002263 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
Roger Luethi00b428c2006-03-28 20:53:56 +02002265 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002266 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
2268 return rc;
2269}
2270
2271static int rhine_close(struct net_device *dev)
2272{
2273 struct rhine_private *rp = netdev_priv(dev);
2274 void __iomem *ioaddr = rp->base;
2275
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002276 rhine_task_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002277 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002278 netif_stop_queue(dev);
2279
Francois Romieufc3e0f82012-01-07 22:39:37 +01002280 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2281 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
2283 /* Switch to loopback mode to avoid hardware races. */
2284 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2285
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002286 rhine_irq_disable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
2288 /* Stop the chip's Tx and Rx processes. */
2289 iowrite16(CmdStop, ioaddr + ChipCmd);
2290
Alexey Charkovf7630d12014-04-22 19:28:08 +04002291 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 free_rbufs(dev);
2293 free_tbufs(dev);
2294 free_ring(dev);
2295
2296 return 0;
2297}
2298
2299
Bill Pemberton76e239e2012-12-03 09:23:48 -05002300static void rhine_remove_one(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301{
2302 struct net_device *dev = pci_get_drvdata(pdev);
2303 struct rhine_private *rp = netdev_priv(dev);
2304
2305 unregister_netdev(dev);
2306
2307 pci_iounmap(pdev, rp->base);
2308 pci_release_regions(pdev);
2309
2310 free_netdev(dev);
2311 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312}
2313
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -07002314static void rhine_shutdown (struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 struct net_device *dev = pci_get_drvdata(pdev);
2317 struct rhine_private *rp = netdev_priv(dev);
2318 void __iomem *ioaddr = rp->base;
2319
2320 if (!(rp->quirks & rqWOL))
2321 return; /* Nothing to do for non-WOL adapters */
2322
2323 rhine_power_init(dev);
2324
2325 /* Make sure we use pattern 0, 1 and not 4, 5 */
2326 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002327 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002329 spin_lock(&rp->lock);
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 if (rp->wolopts & WAKE_MAGIC) {
2332 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2333 /*
2334 * Turn EEPROM-controlled wake-up back on -- some hardware may
2335 * not cooperate otherwise.
2336 */
2337 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2338 }
2339
2340 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2341 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2342
2343 if (rp->wolopts & WAKE_PHY)
2344 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2345
2346 if (rp->wolopts & WAKE_UCAST)
2347 iowrite8(WOLucast, ioaddr + WOLcrSet);
2348
2349 if (rp->wolopts) {
2350 /* Enable legacy WOL (for old motherboards) */
2351 iowrite8(0x01, ioaddr + PwcfgSet);
2352 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2353 }
2354
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002355 spin_unlock(&rp->lock);
2356
Francois Romieue92b9b32012-01-07 22:58:27 +01002357 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
Roger Luethib933b4d2006-08-14 23:00:21 -07002358 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Francois Romieue92b9b32012-01-07 22:58:27 +01002360 pci_wake_from_d3(pdev, true);
2361 pci_set_power_state(pdev, PCI_D3hot);
2362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363}
2364
Francois Romieue92b9b32012-01-07 22:58:27 +01002365#ifdef CONFIG_PM_SLEEP
2366static int rhine_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002368 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
2371 if (!netif_running(dev))
2372 return 0;
2373
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002374 rhine_task_disable(rp);
2375 rhine_irq_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002376 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 netif_device_detach(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Alexey Charkovf7630d12014-04-22 19:28:08 +04002380 if (dev_is_pci(device))
2381 rhine_shutdown(to_pci_dev(device));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 return 0;
2384}
2385
Francois Romieue92b9b32012-01-07 22:58:27 +01002386static int rhine_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002388 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
2391 if (!netif_running(dev))
2392 return 0;
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394#ifdef USE_MMIO
2395 enable_mmio(rp->pioaddr, rp->quirks);
2396#endif
2397 rhine_power_init(dev);
2398 free_tbufs(dev);
2399 free_rbufs(dev);
2400 alloc_tbufs(dev);
2401 alloc_rbufs(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002402 rhine_task_enable(rp);
2403 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 init_registers(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002405 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
2407 netif_device_attach(dev);
2408
2409 return 0;
2410}
Francois Romieue92b9b32012-01-07 22:58:27 +01002411
2412static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2413#define RHINE_PM_OPS (&rhine_pm_ops)
2414
2415#else
2416
2417#define RHINE_PM_OPS NULL
2418
2419#endif /* !CONFIG_PM_SLEEP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421static struct pci_driver rhine_driver = {
2422 .name = DRV_NAME,
2423 .id_table = rhine_pci_tbl,
2424 .probe = rhine_init_one,
Bill Pemberton76e239e2012-12-03 09:23:48 -05002425 .remove = rhine_remove_one,
Francois Romieue92b9b32012-01-07 22:58:27 +01002426 .shutdown = rhine_shutdown,
2427 .driver.pm = RHINE_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428};
2429
Sachin Kamat77273ea2013-08-07 16:08:16 +05302430static struct dmi_system_id rhine_dmi_table[] __initdata = {
Roger Luethie84df482007-03-06 19:57:37 +01002431 {
2432 .ident = "EPIA-M",
2433 .matches = {
2434 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2435 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2436 },
2437 },
2438 {
2439 .ident = "KV7",
2440 .matches = {
2441 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2442 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2443 },
2444 },
2445 { NULL }
2446};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
2448static int __init rhine_init(void)
2449{
2450/* when a module, this is printed whether or not devices are found in probe */
2451#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002452 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453#endif
Roger Luethie84df482007-03-06 19:57:37 +01002454 if (dmi_check_system(rhine_dmi_table)) {
2455 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002456 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002457 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002458 }
2459 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002460 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002461
Jeff Garzik29917622006-08-19 17:48:59 -04002462 return pci_register_driver(&rhine_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463}
2464
2465
2466static void __exit rhine_cleanup(void)
2467{
2468 pci_unregister_driver(&rhine_driver);
2469}
2470
2471
2472module_init(rhine_init);
2473module_exit(rhine_cleanup);