blob: 725106f75d425a1e4342c59ef46e993a493d292d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
Jeff Garzik03a8c662006-06-27 07:57:22 -040028 [link no longer provides useful info -jgarzik]
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30*/
31
Joe Perchesdf4511f2011-04-16 14:15:25 +000032#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define DRV_NAME "via-rhine"
Roger Luethi207070f2013-09-21 14:24:11 +020035#define DRV_VERSION "1.5.1"
Roger Luethi38f49e82010-12-06 00:59:40 +000036#define DRV_RELDATE "2010-10-09"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Rusty Russelleb939922011-12-19 14:08:01 +000038#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
Francois Romieufc3e0f82012-01-07 22:39:37 +010042static int debug = 0;
43#define RHINE_MSG_DEFAULT \
44 (0x0000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
Joe Perches8e95a202009-12-03 07:58:21 +000048#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
Dustin Marquessb47157f2007-08-10 14:05:15 -070051static int rx_copybreak = 1518;
52#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static int rx_copybreak;
Dustin Marquessb47157f2007-08-10 14:05:15 -070054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Roger Luethib933b4d2006-08-14 23:00:21 -070056/* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
Rusty Russelleb939922011-12-19 14:08:01 +000058static bool avoid_D3;
Roger Luethib933b4d2006-08-14 23:00:21 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
63 */
64
65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67static const int multicast_filter_limit = 32;
68
69
70/* Operational parameters that are set at compile time. */
71
72/* Keep the ring sizes a power of two for compile efficiency.
Tino Reichardt92bf2002015-02-24 10:28:01 -080073 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 * Making the Tx ring too large decreases the effectiveness of channel
75 * bonding and packet priority.
76 * With BQL support, we can increase TX ring safely.
77 * There are no ill effects from too-large receive rings.
78 */
79#define TX_RING_SIZE 64
80#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
Roger Luethi633949a2006-08-14 23:00:17 -070081#define RX_RING_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87
88#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89
90#include <linux/module.h>
91#include <linux/moduleparam.h>
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/timer.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/interrupt.h>
98#include <linux/pci.h>
Alexey Charkov2d283862014-04-22 19:28:09 +040099#include <linux/of_address.h>
100#include <linux/of_device.h>
101#include <linux/of_irq.h>
102#include <linux/platform_device.h>
Domen Puncer1e7f0bd2005-06-26 18:22:14 -0400103#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <linux/netdevice.h>
105#include <linux/etherdevice.h>
106#include <linux/skbuff.h>
107#include <linux/init.h>
108#include <linux/delay.h>
109#include <linux/mii.h>
110#include <linux/ethtool.h>
111#include <linux/crc32.h>
Roger Luethi38f49e82010-12-06 00:59:40 +0000112#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/bitops.h>
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800114#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <asm/processor.h> /* Processor type for cache alignment. */
116#include <asm/io.h>
117#include <asm/irq.h>
118#include <asm/uaccess.h>
Roger Luethie84df482007-03-06 19:57:37 +0100119#include <linux/dmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121/* These identify the driver base version and may not be removed. */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500122static const char version[] =
Joe Perchesdf4511f2011-04-16 14:15:25 +0000123 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
126MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
127MODULE_LICENSE("GPL");
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129module_param(debug, int, 0);
130module_param(rx_copybreak, int, 0);
Roger Luethib933b4d2006-08-14 23:00:21 -0700131module_param(avoid_D3, bool, 0);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100132MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
Roger Luethib933b4d2006-08-14 23:00:21 -0700134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Roger Luethi38f49e82010-12-06 00:59:40 +0000136#define MCAM_SIZE 32
137#define VCAM_SIZE 32
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 Theory of Operation
141
142I. Board Compatibility
143
144This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
145controller.
146
147II. Board-specific settings
148
149Boards with this chip are functional only in a bus-master PCI slot.
150
151Many operational settings are loaded from the EEPROM to the Config word at
152offset 0x78. For most of these settings, this driver assumes that they are
153correct.
154If this driver is compiled to use PCI memory space operations the EEPROM
155must be configured to enable memory ops.
156
157III. Driver operation
158
159IIIa. Ring buffers
160
161This driver uses two statically allocated fixed-size descriptor lists
162formed into rings by a branch from the final descriptor to the beginning of
163the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
164
165IIIb/c. Transmit/Receive Structure
166
167This driver attempts to use a zero-copy receive and transmit scheme.
168
169Alas, all data buffers are required to start on a 32 bit boundary, so
170the driver must often copy transmit packets into bounce buffers.
171
172The driver allocates full frame size skbuffs for the Rx ring buffers at
173open() time and passes the skb->data field to the chip as receive data
174buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
175a fresh skbuff is allocated and the frame is copied to the new skbuff.
176When the incoming frame is larger, the skbuff is passed directly up the
177protocol stack. Buffers consumed this way are replaced by newly allocated
178skbuffs in the last phase of rhine_rx().
179
180The RX_COPYBREAK value is chosen to trade-off the memory wasted by
181using a full-sized skbuff for small frames vs. the copying costs of larger
182frames. New boards are typically used in generously configured machines
183and the underfilled buffers have negligible impact compared to the benefit of
184a single allocation size, so the default value of zero results in never
185copying packets. When copying is done, the cost is usually mitigated by using
186a combined copy/checksum routine. Copying also preloads the cache, which is
187most useful with small frames.
188
189Since the VIA chips are only able to transfer data to buffers on 32 bit
190boundaries, the IP header at offset 14 in an ethernet frame isn't
191longword aligned for further processing. Copying these unaligned buffers
192has the beneficial effect of 16-byte aligning the IP header.
193
194IIId. Synchronization
195
196The driver runs as two independent, single-threaded flows of control. One
197is the send-packet routine, which enforces single-threaded use by the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800198netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
199which is single threaded by the hardware and interrupt handling software.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201The send packet thread has partial control over the Tx ring. It locks the
Wang Chenb74ca3a2008-12-08 01:14:16 -0800202netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
203the ring is not available it stops the transmit queue by
204calling netif_stop_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206The interrupt handler has exclusive control over the Rx ring and records stats
207from the Tx ring. After reaping the stats, it marks the Tx queue entry as
208empty by incrementing the dirty_tx mark. If at least half of the entries in
209the Rx ring are available the transmit queue is woken up if it was stopped.
210
211IV. Notes
212
213IVb. References
214
215Preliminary VT86C100A manual from http://www.via.com.tw/
216http://www.scyld.com/expert/100mbps.html
217http://www.scyld.com/expert/NWay.html
218ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
220
221
222IVc. Errata
223
224The VT86C100A manual is not reliable information.
225The 3043 chip does not handle unaligned transmit or receive buffers, resulting
226in significant performance degradation for bounce buffer copies on transmit
227and unaligned IP headers on receive.
228The chip does not pad to minimum transmit length.
229
230*/
231
232
233/* This table drives the PCI probe routines. It's mostly boilerplate in all
234 of the drivers, and will likely be provided by some future kernel.
235 Note the matching code -- the first table entry matchs all 56** cards but
236 second only the 1234 card.
237*/
238
239enum rhine_revs {
240 VT86C100A = 0x00,
241 VTunknown0 = 0x20,
242 VT6102 = 0x40,
243 VT8231 = 0x50, /* Integrated MAC */
244 VT8233 = 0x60, /* Integrated MAC */
245 VT8235 = 0x74, /* Integrated MAC */
246 VT8237 = 0x78, /* Integrated MAC */
247 VTunknown1 = 0x7C,
248 VT6105 = 0x80,
249 VT6105_B0 = 0x83,
250 VT6105L = 0x8A,
251 VT6107 = 0x8C,
252 VTunknown2 = 0x8E,
253 VT6105M = 0x90, /* Management adapter */
254};
255
256enum rhine_quirks {
257 rqWOL = 0x0001, /* Wake-On-LAN support */
258 rqForceReset = 0x0002,
259 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
260 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
261 rqRhineI = 0x0100, /* See comment below */
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400262 rqIntPHY = 0x0200, /* Integrated PHY */
263 rqMgmt = 0x0400, /* Management adapter */
Alexey Charkov5b579e22014-05-03 16:40:53 +0400264 rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
265 * switched from PIO mode to MMIO
266 * (only applies to PCI)
267 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268};
269/*
270 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
271 * MMIO as well as for the collision counter and the Tx FIFO underflow
272 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
273 */
274
275/* Beware of PCI posted writes */
276#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
277
Benoit Taine9baa3c32014-08-08 15:56:03 +0200278static const struct pci_device_id rhine_pci_tbl[] = {
Jeff Garzik46009c82006-06-27 09:12:38 -0400279 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
280 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
281 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
282 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 { } /* terminate list */
284};
285MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
286
Alexey Charkov2d283862014-04-22 19:28:09 +0400287/* OpenFirmware identifiers for platform-bus devices
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400288 * The .data field is currently only used to store quirks
Alexey Charkov2d283862014-04-22 19:28:09 +0400289 */
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400290static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
Fabian Frederickd2b75a32015-03-17 19:40:27 +0100291static const struct of_device_id rhine_of_tbl[] = {
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400292 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
Alexey Charkov2d283862014-04-22 19:28:09 +0400293 { } /* terminate list */
294};
295MODULE_DEVICE_TABLE(of, rhine_of_tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297/* Offsets to the device registers. */
298enum register_offsets {
299 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
Roger Luethi38f49e82010-12-06 00:59:40 +0000300 ChipCmd1=0x09, TQWake=0x0A,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 IntrStatus=0x0C, IntrEnable=0x0E,
302 MulticastFilter0=0x10, MulticastFilter1=0x14,
303 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
Roger Luethi38f49e82010-12-06 00:59:40 +0000304 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
306 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
307 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
308 StickyHW=0x83, IntrStatus2=0x84,
Roger Luethi38f49e82010-12-06 00:59:40 +0000309 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
311 WOLcrClr1=0xA6, WOLcgClr=0xA7,
312 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
313};
314
315/* Bits in ConfigD */
316enum backoff_bits {
317 BackOptional=0x01, BackModify=0x02,
318 BackCaptureEffect=0x04, BackRandom=0x08
319};
320
Roger Luethi38f49e82010-12-06 00:59:40 +0000321/* Bits in the TxConfig (TCR) register */
322enum tcr_bits {
323 TCR_PQEN=0x01,
324 TCR_LB0=0x02, /* loopback[0] */
325 TCR_LB1=0x04, /* loopback[1] */
326 TCR_OFSET=0x08,
327 TCR_RTGOPT=0x10,
328 TCR_RTFT0=0x20,
329 TCR_RTFT1=0x40,
330 TCR_RTSF=0x80,
331};
332
333/* Bits in the CamCon (CAMC) register */
334enum camcon_bits {
335 CAMC_CAMEN=0x01,
336 CAMC_VCAMSL=0x02,
337 CAMC_CAMWR=0x04,
338 CAMC_CAMRD=0x08,
339};
340
341/* Bits in the PCIBusConfig1 (BCR1) register */
342enum bcr1_bits {
343 BCR1_POT0=0x01,
344 BCR1_POT1=0x02,
345 BCR1_POT2=0x04,
346 BCR1_CTFT0=0x08,
347 BCR1_CTFT1=0x10,
348 BCR1_CTSF=0x20,
349 BCR1_TXQNOBK=0x40, /* for VT6105 */
350 BCR1_VIDFR=0x80, /* for VT6105 */
351 BCR1_MED0=0x40, /* for VT6102 */
352 BCR1_MED1=0x80, /* for VT6102 */
353};
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355/* Registers we check that mmio and reg are the same. */
356static const int mmio_verify_registers[] = {
357 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
358 0
359};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* Bits in the interrupt status/mask registers. */
362enum intr_status_bits {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100363 IntrRxDone = 0x0001,
364 IntrTxDone = 0x0002,
365 IntrRxErr = 0x0004,
366 IntrTxError = 0x0008,
367 IntrRxEmpty = 0x0020,
368 IntrPCIErr = 0x0040,
369 IntrStatsMax = 0x0080,
370 IntrRxEarly = 0x0100,
371 IntrTxUnderrun = 0x0210,
372 IntrRxOverflow = 0x0400,
373 IntrRxDropped = 0x0800,
374 IntrRxNoBuf = 0x1000,
375 IntrTxAborted = 0x2000,
376 IntrLinkChange = 0x4000,
377 IntrRxWakeUp = 0x8000,
378 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
379 IntrNormalSummary = IntrRxDone | IntrTxDone,
380 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
381 IntrTxUnderrun,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382};
383
384/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
385enum wol_bits {
386 WOLucast = 0x10,
387 WOLmagic = 0x20,
388 WOLbmcast = 0x30,
389 WOLlnkon = 0x40,
390 WOLlnkoff = 0x80,
391};
392
393/* The Rx and Tx buffer descriptors. */
394struct rx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400395 __le32 rx_status;
396 __le32 desc_length; /* Chain flag, Buffer/frame length */
397 __le32 addr;
398 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399};
400struct tx_desc {
Al Viro53c03f52007-08-23 02:33:30 -0400401 __le32 tx_status;
402 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
403 __le32 addr;
404 __le32 next_desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405};
406
407/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
408#define TXDESC 0x00e08000
409
410enum rx_status_bits {
411 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
412};
413
414/* Bits in *_desc.*_status */
415enum desc_status_bits {
416 DescOwn=0x80000000
417};
418
Roger Luethi38f49e82010-12-06 00:59:40 +0000419/* Bits in *_desc.*_length */
420enum desc_length_bits {
421 DescTag=0x00010000
422};
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/* Bits in ChipCmd. */
425enum chip_cmd_bits {
426 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
427 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
428 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
429 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
430};
431
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000432struct rhine_stats {
433 u64 packets;
434 u64 bytes;
435 struct u64_stats_sync syncp;
436};
437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438struct rhine_private {
Roger Luethi38f49e82010-12-06 00:59:40 +0000439 /* Bit mask for configured VLAN ids */
440 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 /* Descriptor rings */
443 struct rx_desc *rx_ring;
444 struct tx_desc *tx_ring;
445 dma_addr_t rx_ring_dma;
446 dma_addr_t tx_ring_dma;
447
448 /* The addresses of receive-in-place skbuffs. */
449 struct sk_buff *rx_skbuff[RX_RING_SIZE];
450 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
451
452 /* The saved address of a sent-in-place packet/buffer, for later free(). */
453 struct sk_buff *tx_skbuff[TX_RING_SIZE];
454 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
455
Roger Luethi4be5de22006-04-04 20:49:16 +0200456 /* Tx bounce buffers (Rhine-I only) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 unsigned char *tx_buf[TX_RING_SIZE];
458 unsigned char *tx_bufs;
459 dma_addr_t tx_bufs_dma;
460
Alexey Charkovf7630d12014-04-22 19:28:08 +0400461 int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 long pioaddr;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700463 struct net_device *dev;
464 struct napi_struct napi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 spinlock_t lock;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100466 struct mutex task_lock;
467 bool task_enable;
468 struct work_struct slow_event_task;
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800469 struct work_struct reset_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Francois Romieufc3e0f82012-01-07 22:39:37 +0100471 u32 msg_enable;
472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 /* Frequently used values: keep some adjacent for cache effect. */
474 u32 quirks;
françois romieu8709bb22015-05-01 22:14:41 +0200475 unsigned int cur_rx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 unsigned int cur_tx, dirty_tx;
477 unsigned int rx_buf_sz; /* Based on MTU+slack. */
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000478 struct rhine_stats rx_stats;
479 struct rhine_stats tx_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 u8 wolopts;
481
482 u8 tx_thresh, rx_thresh;
483
484 struct mii_if_info mii_if;
485 void __iomem *base;
486};
487
Roger Luethi38f49e82010-12-06 00:59:40 +0000488#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
489#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
490#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
491
492#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
493#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
494#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
495
496#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
497#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
498#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
499
500#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
501#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
502#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
503
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505static int mdio_read(struct net_device *dev, int phy_id, int location);
506static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
507static int rhine_open(struct net_device *dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800508static void rhine_reset_task(struct work_struct *work);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100509static void rhine_slow_event_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510static void rhine_tx_timeout(struct net_device *dev);
Stephen Hemminger613573252009-08-31 19:50:58 +0000511static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
512 struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100513static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514static void rhine_tx(struct net_device *dev);
Roger Luethi633949a2006-08-14 23:00:17 -0700515static int rhine_rx(struct net_device *dev, int limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516static void rhine_set_rx_mode(struct net_device *dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000517static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
518 struct rtnl_link_stats64 *stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Jeff Garzik7282d492006-09-13 14:30:00 -0400520static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521static int rhine_close(struct net_device *dev);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000522static int rhine_vlan_rx_add_vid(struct net_device *dev,
523 __be16 proto, u16 vid);
524static int rhine_vlan_rx_kill_vid(struct net_device *dev,
525 __be16 proto, u16 vid);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100526static void rhine_restart_tx(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000528static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
Francois Romieua384a332012-01-07 22:19:36 +0100529{
530 void __iomem *ioaddr = rp->base;
531 int i;
532
533 for (i = 0; i < 1024; i++) {
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000534 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
535
536 if (low ^ has_mask_bits)
Francois Romieua384a332012-01-07 22:19:36 +0100537 break;
538 udelay(10);
539 }
540 if (i > 64) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100541 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000542 "count: %04d\n", low ? "low" : "high", reg, mask, i);
Francois Romieua384a332012-01-07 22:19:36 +0100543 }
544}
545
546static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
547{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000548 rhine_wait_bit(rp, reg, mask, false);
Francois Romieua384a332012-01-07 22:19:36 +0100549}
550
551static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
552{
Andreas Mohr3f8c91a2012-04-01 12:35:00 +0000553 rhine_wait_bit(rp, reg, mask, true);
Francois Romieua384a332012-01-07 22:19:36 +0100554}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Francois Romieua20a28b2011-12-30 14:53:58 +0100556static u32 rhine_get_events(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 void __iomem *ioaddr = rp->base;
559 u32 intr_status;
560
561 intr_status = ioread16(ioaddr + IntrStatus);
562 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
563 if (rp->quirks & rqStatusWBRace)
564 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
565 return intr_status;
566}
567
Francois Romieua20a28b2011-12-30 14:53:58 +0100568static void rhine_ack_events(struct rhine_private *rp, u32 mask)
569{
570 void __iomem *ioaddr = rp->base;
571
572 if (rp->quirks & rqStatusWBRace)
573 iowrite8(mask >> 16, ioaddr + IntrStatus2);
574 iowrite16(mask, ioaddr + IntrStatus);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100575 mmiowb();
Francois Romieua20a28b2011-12-30 14:53:58 +0100576}
577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578/*
579 * Get power related registers into sane state.
580 * Notify user about past WOL event.
581 */
582static void rhine_power_init(struct net_device *dev)
583{
584 struct rhine_private *rp = netdev_priv(dev);
585 void __iomem *ioaddr = rp->base;
586 u16 wolstat;
587
588 if (rp->quirks & rqWOL) {
589 /* Make sure chip is in power state D0 */
590 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
591
592 /* Disable "force PME-enable" */
593 iowrite8(0x80, ioaddr + WOLcgClr);
594
595 /* Clear power-event config bits (WOL) */
596 iowrite8(0xFF, ioaddr + WOLcrClr);
597 /* More recent cards can manage two additional patterns */
598 if (rp->quirks & rq6patterns)
599 iowrite8(0x03, ioaddr + WOLcrClr1);
600
601 /* Save power-event status bits */
602 wolstat = ioread8(ioaddr + PwrcsrSet);
603 if (rp->quirks & rq6patterns)
604 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
605
606 /* Clear power-event status bits */
607 iowrite8(0xFF, ioaddr + PwrcsrClr);
608 if (rp->quirks & rq6patterns)
609 iowrite8(0x03, ioaddr + PwrcsrClr1);
610
611 if (wolstat) {
612 char *reason;
613 switch (wolstat) {
614 case WOLmagic:
615 reason = "Magic packet";
616 break;
617 case WOLlnkon:
618 reason = "Link went up";
619 break;
620 case WOLlnkoff:
621 reason = "Link went down";
622 break;
623 case WOLucast:
624 reason = "Unicast packet";
625 break;
626 case WOLbmcast:
627 reason = "Multicast/broadcast packet";
628 break;
629 default:
630 reason = "Unknown";
631 }
Joe Perchesdf4511f2011-04-16 14:15:25 +0000632 netdev_info(dev, "Woke system up. Reason: %s\n",
633 reason);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
635 }
636}
637
638static void rhine_chip_reset(struct net_device *dev)
639{
640 struct rhine_private *rp = netdev_priv(dev);
641 void __iomem *ioaddr = rp->base;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100642 u8 cmd1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
645 IOSYNC;
646
647 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
Joe Perchesdf4511f2011-04-16 14:15:25 +0000648 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 /* Force reset */
651 if (rp->quirks & rqForceReset)
652 iowrite8(0x40, ioaddr + MiscCmd);
653
654 /* Reset can take somewhat longer (rare) */
Francois Romieua384a332012-01-07 22:19:36 +0100655 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
657
Francois Romieufc3e0f82012-01-07 22:39:37 +0100658 cmd1 = ioread8(ioaddr + ChipCmd1);
659 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
660 "failed" : "succeeded");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661}
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663static void enable_mmio(long pioaddr, u32 quirks)
664{
665 int n;
Alexey Charkov5b579e22014-05-03 16:40:53 +0400666
667 if (quirks & rqNeedEnMMIO) {
668 if (quirks & rqRhineI) {
669 /* More recent docs say that this bit is reserved */
670 n = inb(pioaddr + ConfigA) | 0x20;
671 outb(n, pioaddr + ConfigA);
672 } else {
673 n = inb(pioaddr + ConfigD) | 0x80;
674 outb(n, pioaddr + ConfigD);
675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 }
677}
Alexey Charkov5b579e22014-05-03 16:40:53 +0400678
679static inline int verify_mmio(struct device *hwdev,
680 long pioaddr,
681 void __iomem *ioaddr,
682 u32 quirks)
683{
684 if (quirks & rqNeedEnMMIO) {
685 int i = 0;
686
687 /* Check that selected MMIO registers match the PIO ones */
688 while (mmio_verify_registers[i]) {
689 int reg = mmio_verify_registers[i++];
690 unsigned char a = inb(pioaddr+reg);
691 unsigned char b = readb(ioaddr+reg);
692
693 if (a != b) {
694 dev_err(hwdev,
695 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
696 reg, a, b);
697 return -EIO;
698 }
699 }
700 }
701 return 0;
702}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704/*
705 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
706 * (plus 0x6C for Rhine-I/II)
707 */
Bill Pemberton76e239e2012-12-03 09:23:48 -0500708static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
710 struct rhine_private *rp = netdev_priv(dev);
711 void __iomem *ioaddr = rp->base;
Francois Romieua384a332012-01-07 22:19:36 +0100712 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 outb(0x20, pioaddr + MACRegEEcsr);
Francois Romieua384a332012-01-07 22:19:36 +0100715 for (i = 0; i < 1024; i++) {
716 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
717 break;
718 }
719 if (i > 512)
720 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 /*
723 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
724 * MMIO. If reloading EEPROM was done first this could be avoided, but
725 * it is not known if that still works with the "win98-reboot" problem.
726 */
727 enable_mmio(pioaddr, rp->quirks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729 /* Turn off EEPROM-controlled wake-up (magic packet) */
730 if (rp->quirks & rqWOL)
731 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
732
733}
734
735#ifdef CONFIG_NET_POLL_CONTROLLER
736static void rhine_poll(struct net_device *dev)
737{
Francois Romieu05d334e2012-03-09 15:28:18 +0100738 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +0400739 const int irq = rp->irq;
Francois Romieu05d334e2012-03-09 15:28:18 +0100740
741 disable_irq(irq);
742 rhine_interrupt(irq, dev);
743 enable_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744}
745#endif
746
Francois Romieu269f3112011-12-30 14:43:54 +0100747static void rhine_kick_tx_threshold(struct rhine_private *rp)
748{
749 if (rp->tx_thresh < 0xe0) {
750 void __iomem *ioaddr = rp->base;
751
752 rp->tx_thresh += 0x20;
753 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
754 }
755}
756
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100757static void rhine_tx_err(struct rhine_private *rp, u32 status)
758{
759 struct net_device *dev = rp->dev;
760
761 if (status & IntrTxAborted) {
Francois Romieufc3e0f82012-01-07 22:39:37 +0100762 netif_info(rp, tx_err, dev,
763 "Abort %08x, frame dropped\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100764 }
765
766 if (status & IntrTxUnderrun) {
767 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100768 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
769 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100770 }
771
Francois Romieufc3e0f82012-01-07 22:39:37 +0100772 if (status & IntrTxDescRace)
773 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100774
775 if ((status & IntrTxError) &&
776 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
777 rhine_kick_tx_threshold(rp);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100778 netif_info(rp, tx_err, dev, "Unspecified error. "
779 "Tx threshold now %02x\n", rp->tx_thresh);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100780 }
781
782 rhine_restart_tx(dev);
783}
784
785static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
786{
787 void __iomem *ioaddr = rp->base;
788 struct net_device_stats *stats = &rp->dev->stats;
789
790 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
791 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
792
793 /*
794 * Clears the "tally counters" for CRC errors and missed frames(?).
795 * It has been reported that some chips need a write of 0 to clear
796 * these, for others the counters are set to 1 when written to and
797 * instead cleared when read. So we clear them both ways ...
798 */
799 iowrite32(0, ioaddr + RxMissed);
800 ioread16(ioaddr + RxCRCErrs);
801 ioread16(ioaddr + RxMissed);
802}
803
804#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
805 IntrRxErr | \
806 IntrRxEmpty | \
807 IntrRxOverflow | \
808 IntrRxDropped | \
809 IntrRxNoBuf | \
810 IntrRxWakeUp)
811
812#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
813 IntrTxAborted | \
814 IntrTxUnderrun | \
815 IntrTxDescRace)
816#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
817
818#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
819 RHINE_EVENT_NAPI_TX | \
820 IntrStatsMax)
821#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
822#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
823
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700824static int rhine_napipoll(struct napi_struct *napi, int budget)
Roger Luethi633949a2006-08-14 23:00:17 -0700825{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700826 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
827 struct net_device *dev = rp->dev;
Roger Luethi633949a2006-08-14 23:00:17 -0700828 void __iomem *ioaddr = rp->base;
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100829 u16 enable_mask = RHINE_EVENT & 0xffff;
830 int work_done = 0;
831 u32 status;
Roger Luethi633949a2006-08-14 23:00:17 -0700832
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100833 status = rhine_get_events(rp);
834 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
835
836 if (status & RHINE_EVENT_NAPI_RX)
837 work_done += rhine_rx(dev, budget);
838
839 if (status & RHINE_EVENT_NAPI_TX) {
840 if (status & RHINE_EVENT_NAPI_TX_ERR) {
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100841 /* Avoid scavenging before Tx engine turned off */
Francois Romieua384a332012-01-07 22:19:36 +0100842 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
Francois Romieufc3e0f82012-01-07 22:39:37 +0100843 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
844 netif_warn(rp, tx_err, dev, "Tx still on\n");
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100845 }
Francois Romieufc3e0f82012-01-07 22:39:37 +0100846
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100847 rhine_tx(dev);
848
849 if (status & RHINE_EVENT_NAPI_TX_ERR)
850 rhine_tx_err(rp, status);
851 }
852
853 if (status & IntrStatsMax) {
854 spin_lock(&rp->lock);
855 rhine_update_rx_crc_and_missed_errord(rp);
856 spin_unlock(&rp->lock);
857 }
858
859 if (status & RHINE_EVENT_SLOW) {
860 enable_mask &= ~RHINE_EVENT_SLOW;
861 schedule_work(&rp->slow_event_task);
862 }
Roger Luethi633949a2006-08-14 23:00:17 -0700863
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700864 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800865 napi_complete(napi);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100866 iowrite16(enable_mask, ioaddr + IntrEnable);
867 mmiowb();
Roger Luethi633949a2006-08-14 23:00:17 -0700868 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700869 return work_done;
Roger Luethi633949a2006-08-14 23:00:17 -0700870}
Roger Luethi633949a2006-08-14 23:00:17 -0700871
Bill Pemberton76e239e2012-12-03 09:23:48 -0500872static void rhine_hw_init(struct net_device *dev, long pioaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
874 struct rhine_private *rp = netdev_priv(dev);
875
876 /* Reset the chip to erase previous misconfiguration. */
877 rhine_chip_reset(dev);
878
879 /* Rhine-I needs extra time to recuperate before EEPROM reload */
880 if (rp->quirks & rqRhineI)
881 msleep(5);
882
883 /* Reload EEPROM controlled bytes cleared by soft reset */
Alexey Charkov2d283862014-04-22 19:28:09 +0400884 if (dev_is_pci(dev->dev.parent))
885 rhine_reload_eeprom(pioaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886}
887
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800888static const struct net_device_ops rhine_netdev_ops = {
889 .ndo_open = rhine_open,
890 .ndo_stop = rhine_close,
891 .ndo_start_xmit = rhine_start_tx,
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +0000892 .ndo_get_stats64 = rhine_get_stats64,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000893 .ndo_set_rx_mode = rhine_set_rx_mode,
Ben Hutchings635ecaa2009-07-09 17:59:01 +0000894 .ndo_change_mtu = eth_change_mtu,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800895 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerfe96aaa2009-01-09 11:13:14 +0000896 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800897 .ndo_do_ioctl = netdev_ioctl,
898 .ndo_tx_timeout = rhine_tx_timeout,
Roger Luethi38f49e82010-12-06 00:59:40 +0000899 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
900 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800901#ifdef CONFIG_NET_POLL_CONTROLLER
902 .ndo_poll_controller = rhine_poll,
903#endif
904};
905
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400906static int rhine_init_one_common(struct device *hwdev, u32 quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +0400907 long pioaddr, void __iomem *ioaddr, int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908{
909 struct net_device *dev;
910 struct rhine_private *rp;
Alexey Charkov2d283862014-04-22 19:28:09 +0400911 int i, rc, phy_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914 /* this should always be supported */
Alexey Charkovf7630d12014-04-22 19:28:08 +0400915 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 if (rc) {
Alexey Charkovf7630d12014-04-22 19:28:08 +0400917 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
Alexey Charkov2d283862014-04-22 19:28:09 +0400918 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 }
920
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 dev = alloc_etherdev(sizeof(struct rhine_private));
922 if (!dev) {
923 rc = -ENOMEM;
Alexey Charkov2d283862014-04-22 19:28:09 +0400924 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 }
Alexey Charkovf7630d12014-04-22 19:28:08 +0400926 SET_NETDEV_DEV(dev, hwdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
928 rp = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700929 rp->dev = dev;
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400930 rp->quirks = quirks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 rp->pioaddr = pioaddr;
Alexey Charkov2d283862014-04-22 19:28:09 +0400932 rp->base = ioaddr;
933 rp->irq = irq;
Francois Romieufc3e0f82012-01-07 22:39:37 +0100934 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400936 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
John Stultz827da442013-10-07 15:51:58 -0700938 u64_stats_init(&rp->tx_stats.syncp);
939 u64_stats_init(&rp->rx_stats.syncp);
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 /* Get chip registers into a sane state */
942 rhine_power_init(dev);
943 rhine_hw_init(dev, pioaddr);
944
945 for (i = 0; i < 6; i++)
946 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
947
Joe Perches482e3fe2011-04-16 14:15:26 +0000948 if (!is_valid_ether_addr(dev->dev_addr)) {
949 /* Report it and use a random ethernet address instead */
950 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000951 eth_hw_addr_random(dev);
Joe Perches482e3fe2011-04-16 14:15:26 +0000952 netdev_info(dev, "Using random MAC address: %pM\n",
953 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 }
955
956 /* For Rhine-I/II, phy_id is loaded from EEPROM */
957 if (!phy_id)
958 phy_id = ioread8(ioaddr + 0x6C);
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 spin_lock_init(&rp->lock);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100961 mutex_init(&rp->task_lock);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800962 INIT_WORK(&rp->reset_task, rhine_reset_task);
Francois Romieu7ab87ff2012-01-06 21:42:26 +0100963 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -0800964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 rp->mii_if.dev = dev;
966 rp->mii_if.mdio_read = mdio_read;
967 rp->mii_if.mdio_write = mdio_write;
968 rp->mii_if.phy_id_mask = 0x1f;
969 rp->mii_if.reg_num_mask = 0x1f;
970
971 /* The chip-specific entries in the device structure. */
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800972 dev->netdev_ops = &rhine_netdev_ops;
wangweidonge76070f2014-03-17 15:52:17 +0800973 dev->ethtool_ops = &netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemminger5d1d07d2008-11-21 17:30:11 -0800975
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700976 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
Francois Romieu32b0f532008-07-11 00:30:14 +0200977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 if (rp->quirks & rqRhineI)
979 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
980
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400981 if (rp->quirks & rqMgmt)
Patrick McHardyf6469682013-04-19 02:04:27 +0000982 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
983 NETIF_F_HW_VLAN_CTAG_RX |
984 NETIF_F_HW_VLAN_CTAG_FILTER;
Roger Luethi38f49e82010-12-06 00:59:40 +0000985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 /* dev->name not defined before register_netdev()! */
987 rc = register_netdev(dev);
988 if (rc)
Alexey Charkov2d283862014-04-22 19:28:09 +0400989 goto err_out_free_netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Alexey Charkovca8b6e02014-04-30 22:21:09 +0400991 if (rp->quirks & rqRhineI)
992 name = "Rhine";
993 else if (rp->quirks & rqStatusWBRace)
994 name = "Rhine II";
995 else if (rp->quirks & rqMgmt)
996 name = "Rhine III (Management Adapter)";
997 else
998 name = "Rhine III";
999
Joe Perchesdf4511f2011-04-16 14:15:25 +00001000 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
Alexey Charkov2d283862014-04-22 19:28:09 +04001001 name, (long)ioaddr, dev->dev_addr, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Alexey Charkovf7630d12014-04-22 19:28:08 +04001003 dev_set_drvdata(hwdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 {
1006 u16 mii_cmd;
1007 int mii_status = mdio_read(dev, phy_id, 1);
1008 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1009 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1010 if (mii_status != 0xffff && mii_status != 0x0000) {
1011 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
Joe Perchesdf4511f2011-04-16 14:15:25 +00001012 netdev_info(dev,
1013 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1014 phy_id,
1015 mii_status, rp->mii_if.advertising,
1016 mdio_read(dev, phy_id, 5));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 /* set IFF_RUNNING */
1019 if (mii_status & BMSR_LSTATUS)
1020 netif_carrier_on(dev);
1021 else
1022 netif_carrier_off(dev);
1023
1024 }
1025 }
1026 rp->mii_if.phy_id = phy_id;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001027 if (avoid_D3)
1028 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
1030 return 0;
1031
Alexey Charkov2d283862014-04-22 19:28:09 +04001032err_out_free_netdev:
1033 free_netdev(dev);
1034err_out:
1035 return rc;
1036}
1037
1038static int rhine_init_one_pci(struct pci_dev *pdev,
1039 const struct pci_device_id *ent)
1040{
1041 struct device *hwdev = &pdev->dev;
Alexey Charkov5b579e22014-05-03 16:40:53 +04001042 int rc;
Alexey Charkov2d283862014-04-22 19:28:09 +04001043 long pioaddr, memaddr;
1044 void __iomem *ioaddr;
1045 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
Alexey Charkov5b579e22014-05-03 16:40:53 +04001046
1047/* This driver was written to use PCI memory space. Some early versions
1048 * of the Rhine may only work correctly with I/O space accesses.
1049 * TODO: determine for which revisions this is true and assign the flag
1050 * in code as opposed to this Kconfig option (???)
1051 */
1052#ifdef CONFIG_VIA_RHINE_MMIO
1053 u32 quirks = rqNeedEnMMIO;
Alexey Charkov2d283862014-04-22 19:28:09 +04001054#else
Alexey Charkov5b579e22014-05-03 16:40:53 +04001055 u32 quirks = 0;
Alexey Charkov2d283862014-04-22 19:28:09 +04001056#endif
1057
1058/* when built into the kernel, we only print version if device is found */
1059#ifndef MODULE
1060 pr_info_once("%s\n", version);
1061#endif
1062
1063 rc = pci_enable_device(pdev);
1064 if (rc)
1065 goto err_out;
1066
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001067 if (pdev->revision < VTunknown0) {
Alexey Charkov5b579e22014-05-03 16:40:53 +04001068 quirks |= rqRhineI;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001069 } else if (pdev->revision >= VT6102) {
Alexey Charkov5b579e22014-05-03 16:40:53 +04001070 quirks |= rqWOL | rqForceReset;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001071 if (pdev->revision < VT6105) {
1072 quirks |= rqStatusWBRace;
1073 } else {
1074 quirks |= rqIntPHY;
1075 if (pdev->revision >= VT6105_B0)
1076 quirks |= rq6patterns;
1077 if (pdev->revision >= VT6105M)
1078 quirks |= rqMgmt;
1079 }
1080 }
1081
Alexey Charkov2d283862014-04-22 19:28:09 +04001082 /* sanity check */
1083 if ((pci_resource_len(pdev, 0) < io_size) ||
1084 (pci_resource_len(pdev, 1) < io_size)) {
1085 rc = -EIO;
1086 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1087 goto err_out_pci_disable;
1088 }
1089
1090 pioaddr = pci_resource_start(pdev, 0);
1091 memaddr = pci_resource_start(pdev, 1);
1092
1093 pci_set_master(pdev);
1094
1095 rc = pci_request_regions(pdev, DRV_NAME);
1096 if (rc)
1097 goto err_out_pci_disable;
1098
Alexey Charkov5b579e22014-05-03 16:40:53 +04001099 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
Alexey Charkov2d283862014-04-22 19:28:09 +04001100 if (!ioaddr) {
1101 rc = -EIO;
1102 dev_err(hwdev,
1103 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1104 dev_name(hwdev), io_size, memaddr);
1105 goto err_out_free_res;
1106 }
1107
Alexey Charkov2d283862014-04-22 19:28:09 +04001108 enable_mmio(pioaddr, quirks);
1109
Alexey Charkov5b579e22014-05-03 16:40:53 +04001110 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1111 if (rc)
1112 goto err_out_unmap;
Alexey Charkov2d283862014-04-22 19:28:09 +04001113
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001114 rc = rhine_init_one_common(&pdev->dev, quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +04001115 pioaddr, ioaddr, pdev->irq);
1116 if (!rc)
1117 return 0;
1118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119err_out_unmap:
1120 pci_iounmap(pdev, ioaddr);
1121err_out_free_res:
1122 pci_release_regions(pdev);
Roger Luethiae996152014-03-18 18:14:01 +01001123err_out_pci_disable:
1124 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125err_out:
1126 return rc;
1127}
1128
Alexey Charkov2d283862014-04-22 19:28:09 +04001129static int rhine_init_one_platform(struct platform_device *pdev)
1130{
1131 const struct of_device_id *match;
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001132 const u32 *quirks;
Alexey Charkov2d283862014-04-22 19:28:09 +04001133 int irq;
1134 struct resource *res;
1135 void __iomem *ioaddr;
1136
1137 match = of_match_device(rhine_of_tbl, &pdev->dev);
1138 if (!match)
1139 return -EINVAL;
1140
1141 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1142 ioaddr = devm_ioremap_resource(&pdev->dev, res);
1143 if (IS_ERR(ioaddr))
1144 return PTR_ERR(ioaddr);
1145
1146 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1147 if (!irq)
1148 return -EINVAL;
1149
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001150 quirks = match->data;
1151 if (!quirks)
Alexey Charkov2d283862014-04-22 19:28:09 +04001152 return -EINVAL;
1153
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001154 return rhine_init_one_common(&pdev->dev, *quirks,
Alexey Charkov2d283862014-04-22 19:28:09 +04001155 (long)ioaddr, ioaddr, irq);
1156}
1157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158static int alloc_ring(struct net_device* dev)
1159{
1160 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001161 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 void *ring;
1163 dma_addr_t ring_dma;
1164
Alexey Charkovf7630d12014-04-22 19:28:08 +04001165 ring = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001166 RX_RING_SIZE * sizeof(struct rx_desc) +
1167 TX_RING_SIZE * sizeof(struct tx_desc),
1168 &ring_dma,
1169 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 if (!ring) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00001171 netdev_err(dev, "Could not allocate DMA memory\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 return -ENOMEM;
1173 }
1174 if (rp->quirks & rqRhineI) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001175 rp->tx_bufs = dma_alloc_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001176 PKT_BUF_SZ * TX_RING_SIZE,
1177 &rp->tx_bufs_dma,
1178 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 if (rp->tx_bufs == NULL) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001180 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001181 RX_RING_SIZE * sizeof(struct rx_desc) +
1182 TX_RING_SIZE * sizeof(struct tx_desc),
1183 ring, ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 return -ENOMEM;
1185 }
1186 }
1187
1188 rp->rx_ring = ring;
1189 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1190 rp->rx_ring_dma = ring_dma;
1191 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1192
1193 return 0;
1194}
1195
1196static void free_ring(struct net_device* dev)
1197{
1198 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001199 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
Alexey Charkovf7630d12014-04-22 19:28:08 +04001201 dma_free_coherent(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001202 RX_RING_SIZE * sizeof(struct rx_desc) +
1203 TX_RING_SIZE * sizeof(struct tx_desc),
1204 rp->rx_ring, rp->rx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 rp->tx_ring = NULL;
1206
1207 if (rp->tx_bufs)
Alexey Charkovf7630d12014-04-22 19:28:08 +04001208 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001209 rp->tx_bufs, rp->tx_bufs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 rp->tx_bufs = NULL;
1212
1213}
1214
françois romieua21bb8b2015-05-01 22:14:39 +02001215struct rhine_skb_dma {
1216 struct sk_buff *skb;
1217 dma_addr_t dma;
1218};
1219
1220static inline int rhine_skb_dma_init(struct net_device *dev,
1221 struct rhine_skb_dma *sd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222{
1223 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001224 struct device *hwdev = dev->dev.parent;
françois romieua21bb8b2015-05-01 22:14:39 +02001225 const int size = rp->rx_buf_sz;
1226
1227 sd->skb = netdev_alloc_skb(dev, size);
1228 if (!sd->skb)
1229 return -ENOMEM;
1230
1231 sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1232 if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1233 netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1234 dev_kfree_skb_any(sd->skb);
1235 return -EIO;
1236 }
1237
1238 return 0;
1239}
1240
françois romieu8709bb22015-05-01 22:14:41 +02001241static void rhine_reset_rbufs(struct rhine_private *rp)
1242{
1243 int i;
1244
1245 rp->cur_rx = 0;
françois romieu8709bb22015-05-01 22:14:41 +02001246
1247 for (i = 0; i < RX_RING_SIZE; i++)
1248 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1249}
1250
françois romieua21bb8b2015-05-01 22:14:39 +02001251static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1252 struct rhine_skb_dma *sd, int entry)
1253{
1254 rp->rx_skbuff_dma[entry] = sd->dma;
1255 rp->rx_skbuff[entry] = sd->skb;
1256
1257 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1258 dma_wmb();
1259}
1260
françois romieu8709bb22015-05-01 22:14:41 +02001261static void free_rbufs(struct net_device* dev);
1262
1263static int alloc_rbufs(struct net_device *dev)
françois romieua21bb8b2015-05-01 22:14:39 +02001264{
1265 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 dma_addr_t next;
françois romieua21bb8b2015-05-01 22:14:39 +02001267 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 next = rp->rx_ring_dma;
1271
1272 /* Init the ring entries */
1273 for (i = 0; i < RX_RING_SIZE; i++) {
1274 rp->rx_ring[i].rx_status = 0;
1275 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1276 next += sizeof(struct rx_desc);
1277 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1278 rp->rx_skbuff[i] = NULL;
1279 }
1280 /* Mark the last entry as wrapping the ring. */
1281 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1282
1283 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1284 for (i = 0; i < RX_RING_SIZE; i++) {
françois romieua21bb8b2015-05-01 22:14:39 +02001285 struct rhine_skb_dma sd;
1286
1287 rc = rhine_skb_dma_init(dev, &sd);
françois romieu8709bb22015-05-01 22:14:41 +02001288 if (rc < 0) {
1289 free_rbufs(dev);
1290 goto out;
1291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
françois romieua21bb8b2015-05-01 22:14:39 +02001293 rhine_skb_dma_nic_store(rp, &sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 }
françois romieu8709bb22015-05-01 22:14:41 +02001295
1296 rhine_reset_rbufs(rp);
1297out:
1298 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299}
1300
1301static void free_rbufs(struct net_device* dev)
1302{
1303 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001304 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 int i;
1306
1307 /* Free all the skbuffs in the Rx queue. */
1308 for (i = 0; i < RX_RING_SIZE; i++) {
1309 rp->rx_ring[i].rx_status = 0;
1310 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1311 if (rp->rx_skbuff[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001312 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 rp->rx_skbuff_dma[i],
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001314 rp->rx_buf_sz, DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 dev_kfree_skb(rp->rx_skbuff[i]);
1316 }
1317 rp->rx_skbuff[i] = NULL;
1318 }
1319}
1320
1321static void alloc_tbufs(struct net_device* dev)
1322{
1323 struct rhine_private *rp = netdev_priv(dev);
1324 dma_addr_t next;
1325 int i;
1326
1327 rp->dirty_tx = rp->cur_tx = 0;
1328 next = rp->tx_ring_dma;
1329 for (i = 0; i < TX_RING_SIZE; i++) {
1330 rp->tx_skbuff[i] = NULL;
1331 rp->tx_ring[i].tx_status = 0;
1332 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1333 next += sizeof(struct tx_desc);
1334 rp->tx_ring[i].next_desc = cpu_to_le32(next);
Roger Luethi4be5de22006-04-04 20:49:16 +02001335 if (rp->quirks & rqRhineI)
1336 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 }
1338 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1339
Tino Reichardt92bf2002015-02-24 10:28:01 -08001340 netdev_reset_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341}
1342
1343static void free_tbufs(struct net_device* dev)
1344{
1345 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001346 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 int i;
1348
1349 for (i = 0; i < TX_RING_SIZE; i++) {
1350 rp->tx_ring[i].tx_status = 0;
1351 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1352 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1353 if (rp->tx_skbuff[i]) {
1354 if (rp->tx_skbuff_dma[i]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001355 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 rp->tx_skbuff_dma[i],
1357 rp->tx_skbuff[i]->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001358 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 }
1360 dev_kfree_skb(rp->tx_skbuff[i]);
1361 }
1362 rp->tx_skbuff[i] = NULL;
1363 rp->tx_buf[i] = NULL;
1364 }
1365}
1366
1367static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1368{
1369 struct rhine_private *rp = netdev_priv(dev);
1370 void __iomem *ioaddr = rp->base;
1371
Ben Hutchings5bdc7382015-01-16 17:55:35 +00001372 if (!rp->mii_if.force_media)
1373 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375 if (rp->mii_if.full_duplex)
1376 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1377 ioaddr + ChipCmd1);
1378 else
1379 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1380 ioaddr + ChipCmd1);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001381
1382 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1383 rp->mii_if.force_media, netif_carrier_ok(dev));
Roger Luethi00b428c2006-03-28 20:53:56 +02001384}
1385
1386/* Called after status of force_media possibly changed */
Adrian Bunk0761be42006-04-10 23:22:21 -07001387static void rhine_set_carrier(struct mii_if_info *mii)
Roger Luethi00b428c2006-03-28 20:53:56 +02001388{
Francois Romieufc3e0f82012-01-07 22:39:37 +01001389 struct net_device *dev = mii->dev;
1390 struct rhine_private *rp = netdev_priv(dev);
1391
Roger Luethi00b428c2006-03-28 20:53:56 +02001392 if (mii->force_media) {
1393 /* autoneg is off: Link is always assumed to be up */
Francois Romieufc3e0f82012-01-07 22:39:37 +01001394 if (!netif_carrier_ok(dev))
1395 netif_carrier_on(dev);
François Cachereul17958432014-06-12 12:11:25 +02001396 }
1397
1398 rhine_check_media(dev, 0);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001399
1400 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1401 mii->force_media, netif_carrier_ok(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402}
1403
Roger Luethi38f49e82010-12-06 00:59:40 +00001404/**
1405 * rhine_set_cam - set CAM multicast filters
1406 * @ioaddr: register block of this Rhine
1407 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1408 * @addr: multicast address (6 bytes)
1409 *
1410 * Load addresses into multicast filters.
1411 */
1412static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1413{
1414 int i;
1415
1416 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1417 wmb();
1418
1419 /* Paranoid -- idx out of range should never happen */
1420 idx &= (MCAM_SIZE - 1);
1421
1422 iowrite8((u8) idx, ioaddr + CamAddr);
1423
1424 for (i = 0; i < 6; i++, addr++)
1425 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1426 udelay(10);
1427 wmb();
1428
1429 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1430 udelay(10);
1431
1432 iowrite8(0, ioaddr + CamCon);
1433}
1434
1435/**
1436 * rhine_set_vlan_cam - set CAM VLAN filters
1437 * @ioaddr: register block of this Rhine
1438 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1439 * @addr: VLAN ID (2 bytes)
1440 *
1441 * Load addresses into VLAN filters.
1442 */
1443static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1444{
1445 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1446 wmb();
1447
1448 /* Paranoid -- idx out of range should never happen */
1449 idx &= (VCAM_SIZE - 1);
1450
1451 iowrite8((u8) idx, ioaddr + CamAddr);
1452
1453 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1454 udelay(10);
1455 wmb();
1456
1457 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1458 udelay(10);
1459
1460 iowrite8(0, ioaddr + CamCon);
1461}
1462
1463/**
1464 * rhine_set_cam_mask - set multicast CAM mask
1465 * @ioaddr: register block of this Rhine
1466 * @mask: multicast CAM mask
1467 *
1468 * Mask sets multicast filters active/inactive.
1469 */
1470static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1471{
1472 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1473 wmb();
1474
1475 /* write mask */
1476 iowrite32(mask, ioaddr + CamMask);
1477
1478 /* disable CAMEN */
1479 iowrite8(0, ioaddr + CamCon);
1480}
1481
1482/**
1483 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1484 * @ioaddr: register block of this Rhine
1485 * @mask: VLAN CAM mask
1486 *
1487 * Mask sets VLAN filters active/inactive.
1488 */
1489static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1490{
1491 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1492 wmb();
1493
1494 /* write mask */
1495 iowrite32(mask, ioaddr + CamMask);
1496
1497 /* disable CAMEN */
1498 iowrite8(0, ioaddr + CamCon);
1499}
1500
1501/**
1502 * rhine_init_cam_filter - initialize CAM filters
1503 * @dev: network device
1504 *
1505 * Initialize (disable) hardware VLAN and multicast support on this
1506 * Rhine.
1507 */
1508static void rhine_init_cam_filter(struct net_device *dev)
1509{
1510 struct rhine_private *rp = netdev_priv(dev);
1511 void __iomem *ioaddr = rp->base;
1512
1513 /* Disable all CAMs */
1514 rhine_set_vlan_cam_mask(ioaddr, 0);
1515 rhine_set_cam_mask(ioaddr, 0);
1516
1517 /* disable hardware VLAN support */
1518 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1519 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1520}
1521
1522/**
1523 * rhine_update_vcam - update VLAN CAM filters
1524 * @rp: rhine_private data of this Rhine
1525 *
1526 * Update VLAN CAM filters to match configuration change.
1527 */
1528static void rhine_update_vcam(struct net_device *dev)
1529{
1530 struct rhine_private *rp = netdev_priv(dev);
1531 void __iomem *ioaddr = rp->base;
1532 u16 vid;
1533 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1534 unsigned int i = 0;
1535
1536 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1537 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1538 vCAMmask |= 1 << i;
1539 if (++i >= VCAM_SIZE)
1540 break;
1541 }
1542 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1543}
1544
Patrick McHardy80d5c362013-04-19 02:04:28 +00001545static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001546{
1547 struct rhine_private *rp = netdev_priv(dev);
1548
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001549 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001550 set_bit(vid, rp->active_vlans);
1551 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001552 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001553 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001554}
1555
Patrick McHardy80d5c362013-04-19 02:04:28 +00001556static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
Roger Luethi38f49e82010-12-06 00:59:40 +00001557{
1558 struct rhine_private *rp = netdev_priv(dev);
1559
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001560 spin_lock_bh(&rp->lock);
Roger Luethi38f49e82010-12-06 00:59:40 +00001561 clear_bit(vid, rp->active_vlans);
1562 rhine_update_vcam(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001563 spin_unlock_bh(&rp->lock);
Jiri Pirko8e586132011-12-08 19:52:37 -05001564 return 0;
Roger Luethi38f49e82010-12-06 00:59:40 +00001565}
1566
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567static void init_registers(struct net_device *dev)
1568{
1569 struct rhine_private *rp = netdev_priv(dev);
1570 void __iomem *ioaddr = rp->base;
1571 int i;
1572
1573 for (i = 0; i < 6; i++)
1574 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1575
1576 /* Initialize other registers. */
1577 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1578 /* Configure initial FIFO thresholds. */
1579 iowrite8(0x20, ioaddr + TxConfig);
1580 rp->tx_thresh = 0x20;
1581 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1582
1583 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1584 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1585
1586 rhine_set_rx_mode(dev);
1587
Alexey Charkovca8b6e02014-04-30 22:21:09 +04001588 if (rp->quirks & rqMgmt)
Roger Luethi38f49e82010-12-06 00:59:40 +00001589 rhine_init_cam_filter(dev);
1590
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001591 napi_enable(&rp->napi);
Stephen Hemmingerab197662006-08-14 23:00:18 -07001592
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001593 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1596 ioaddr + ChipCmd);
1597 rhine_check_media(dev, 1);
1598}
1599
1600/* Enable MII link status auto-polling (required for IntrLinkChange) */
Francois Romieua384a332012-01-07 22:19:36 +01001601static void rhine_enable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
Francois Romieua384a332012-01-07 22:19:36 +01001603 void __iomem *ioaddr = rp->base;
1604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 iowrite8(0, ioaddr + MIICmd);
1606 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1607 iowrite8(0x80, ioaddr + MIICmd);
1608
Francois Romieua384a332012-01-07 22:19:36 +01001609 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
1611 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1612}
1613
1614/* Disable MII link status auto-polling (required for MDIO access) */
Francois Romieua384a332012-01-07 22:19:36 +01001615static void rhine_disable_linkmon(struct rhine_private *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
Francois Romieua384a332012-01-07 22:19:36 +01001617 void __iomem *ioaddr = rp->base;
1618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 iowrite8(0, ioaddr + MIICmd);
1620
Francois Romieua384a332012-01-07 22:19:36 +01001621 if (rp->quirks & rqRhineI) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1623
John W. Linville38bb6b22006-05-19 10:51:21 -04001624 /* Can be called from ISR. Evil. */
1625 mdelay(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627 /* 0x80 must be set immediately before turning it off */
1628 iowrite8(0x80, ioaddr + MIICmd);
1629
Francois Romieua384a332012-01-07 22:19:36 +01001630 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 /* Heh. Now clear 0x80 again. */
1633 iowrite8(0, ioaddr + MIICmd);
1634 }
1635 else
Francois Romieua384a332012-01-07 22:19:36 +01001636 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637}
1638
1639/* Read and write over the MII Management Data I/O (MDIO) interface. */
1640
1641static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1642{
1643 struct rhine_private *rp = netdev_priv(dev);
1644 void __iomem *ioaddr = rp->base;
1645 int result;
1646
Francois Romieua384a332012-01-07 22:19:36 +01001647 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
1649 /* rhine_disable_linkmon already cleared MIICmd */
1650 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1651 iowrite8(regnum, ioaddr + MIIRegAddr);
1652 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
Francois Romieua384a332012-01-07 22:19:36 +01001653 rhine_wait_bit_low(rp, MIICmd, 0x40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 result = ioread16(ioaddr + MIIData);
1655
Francois Romieua384a332012-01-07 22:19:36 +01001656 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 return result;
1658}
1659
1660static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1661{
1662 struct rhine_private *rp = netdev_priv(dev);
1663 void __iomem *ioaddr = rp->base;
1664
Francois Romieua384a332012-01-07 22:19:36 +01001665 rhine_disable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
1667 /* rhine_disable_linkmon already cleared MIICmd */
1668 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1669 iowrite8(regnum, ioaddr + MIIRegAddr);
1670 iowrite16(value, ioaddr + MIIData);
1671 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
Francois Romieua384a332012-01-07 22:19:36 +01001672 rhine_wait_bit_low(rp, MIICmd, 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Francois Romieua384a332012-01-07 22:19:36 +01001674 rhine_enable_linkmon(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675}
1676
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001677static void rhine_task_disable(struct rhine_private *rp)
1678{
1679 mutex_lock(&rp->task_lock);
1680 rp->task_enable = false;
1681 mutex_unlock(&rp->task_lock);
1682
1683 cancel_work_sync(&rp->slow_event_task);
1684 cancel_work_sync(&rp->reset_task);
1685}
1686
1687static void rhine_task_enable(struct rhine_private *rp)
1688{
1689 mutex_lock(&rp->task_lock);
1690 rp->task_enable = true;
1691 mutex_unlock(&rp->task_lock);
1692}
1693
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694static int rhine_open(struct net_device *dev)
1695{
1696 struct rhine_private *rp = netdev_priv(dev);
1697 void __iomem *ioaddr = rp->base;
1698 int rc;
1699
Alexey Charkovf7630d12014-04-22 19:28:08 +04001700 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 if (rc)
françois romieu4d1fd9c2015-05-01 22:14:40 +02001702 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Alexey Charkovf7630d12014-04-22 19:28:08 +04001704 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
1706 rc = alloc_ring(dev);
françois romieu4d1fd9c2015-05-01 22:14:40 +02001707 if (rc < 0)
1708 goto out_free_irq;
1709
françois romieu8709bb22015-05-01 22:14:41 +02001710 rc = alloc_rbufs(dev);
1711 if (rc < 0)
1712 goto out_free_ring;
1713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 alloc_tbufs(dev);
1715 rhine_chip_reset(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001716 rhine_task_enable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 init_registers(dev);
Francois Romieufc3e0f82012-01-07 22:39:37 +01001718
1719 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1720 __func__, ioread16(ioaddr + ChipCmd),
1721 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 netif_start_queue(dev);
1724
françois romieu4d1fd9c2015-05-01 22:14:40 +02001725out:
1726 return rc;
1727
françois romieu8709bb22015-05-01 22:14:41 +02001728out_free_ring:
1729 free_ring(dev);
françois romieu4d1fd9c2015-05-01 22:14:40 +02001730out_free_irq:
1731 free_irq(rp->irq, dev);
1732 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733}
1734
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001735static void rhine_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736{
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001737 struct rhine_private *rp = container_of(work, struct rhine_private,
1738 reset_task);
1739 struct net_device *dev = rp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001741 mutex_lock(&rp->task_lock);
1742
1743 if (!rp->task_enable)
1744 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001746 napi_disable(&rp->napi);
Richard Weinbergera9265922014-01-14 22:46:36 +01001747 netif_tx_disable(dev);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001748 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 /* clear all descriptors */
1751 free_tbufs(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 alloc_tbufs(dev);
françois romieu8709bb22015-05-01 22:14:41 +02001753
1754 rhine_reset_rbufs(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
1756 /* Reinitialize the hardware. */
1757 rhine_chip_reset(dev);
1758 init_registers(dev);
1759
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001760 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
Eric Dumazet1ae5dc32010-05-10 05:01:31 -07001762 dev->trans_start = jiffies; /* prevent tx timeout */
Eric Dumazet553e2332009-05-27 10:34:50 +00001763 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 netif_wake_queue(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001765
1766out_unlock:
1767 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768}
1769
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001770static void rhine_tx_timeout(struct net_device *dev)
1771{
1772 struct rhine_private *rp = netdev_priv(dev);
1773 void __iomem *ioaddr = rp->base;
1774
Joe Perchesdf4511f2011-04-16 14:15:25 +00001775 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1776 ioread16(ioaddr + IntrStatus),
1777 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08001778
1779 schedule_work(&rp->reset_task);
1780}
1781
françois romieu3a5a8832015-05-01 22:14:45 +02001782static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1783{
1784 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1785}
1786
Stephen Hemminger613573252009-08-31 19:50:58 +00001787static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1788 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
1790 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001791 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 void __iomem *ioaddr = rp->base;
1793 unsigned entry;
1794
1795 /* Caution: the write order is important here, set the field
1796 with the "ownership" bits last. */
1797
1798 /* Calculate the next Tx descriptor entry. */
1799 entry = rp->cur_tx % TX_RING_SIZE;
1800
Herbert Xu5b057c62006-06-23 02:06:41 -07001801 if (skb_padto(skb, ETH_ZLEN))
Patrick McHardy6ed10652009-06-23 06:03:08 +00001802 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804 rp->tx_skbuff[entry] = skb;
1805
1806 if ((rp->quirks & rqRhineI) &&
Patrick McHardy84fa7932006-08-29 16:44:56 -07001807 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 /* Must use alignment buffer. */
1809 if (skb->len > PKT_BUF_SZ) {
1810 /* packet too long, drop it */
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001811 dev_kfree_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 rp->tx_skbuff[entry] = NULL;
Eric Dumazet553e2332009-05-27 10:34:50 +00001813 dev->stats.tx_dropped++;
Patrick McHardy6ed10652009-06-23 06:03:08 +00001814 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
Craig Brind3e0d1672006-04-27 02:30:46 -07001816
1817 /* Padding is not copied and so must be redone. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
Craig Brind3e0d1672006-04-27 02:30:46 -07001819 if (skb->len < ETH_ZLEN)
1820 memset(rp->tx_buf[entry] + skb->len, 0,
1821 ETH_ZLEN - skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 rp->tx_skbuff_dma[entry] = 0;
1823 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1824 (rp->tx_buf[entry] -
1825 rp->tx_bufs));
1826 } else {
1827 rp->tx_skbuff_dma[entry] =
Alexey Charkovf7630d12014-04-22 19:28:08 +04001828 dma_map_single(hwdev, skb->data, skb->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001829 DMA_TO_DEVICE);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001830 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
Eric W. Biederman4b3afc62014-03-15 18:22:47 -07001831 dev_kfree_skb_any(skb);
Neil Horman9b4fe5f2013-07-12 13:35:33 -04001832 rp->tx_skbuff_dma[entry] = 0;
1833 dev->stats.tx_dropped++;
1834 return NETDEV_TX_OK;
1835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1837 }
1838
1839 rp->tx_ring[entry].desc_length =
1840 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1841
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001842 if (unlikely(skb_vlan_tag_present(skb))) {
1843 u16 vid_pcp = skb_vlan_tag_get(skb);
Roger Luethi207070f2013-09-21 14:24:11 +02001844
1845 /* drop CFI/DEI bit, register needs VID and PCP */
1846 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1847 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1848 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
Roger Luethi38f49e82010-12-06 00:59:40 +00001849 /* request tagging */
1850 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1851 }
1852 else
1853 rp->tx_ring[entry].tx_status = 0;
1854
Tino Reichardt92bf2002015-02-24 10:28:01 -08001855 netdev_sent_queue(dev, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 /* lock eth irq */
françois romieue1efa872015-05-01 22:14:44 +02001857 dma_wmb();
Roger Luethi38f49e82010-12-06 00:59:40 +00001858 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 wmb();
1860
1861 rp->cur_tx++;
françois romieu3a5a8832015-05-01 22:14:45 +02001862 /*
1863 * Nobody wants cur_tx write to rot for ages after the NIC will have
1864 * seen the transmit request, especially as the transmit completion
1865 * handler could miss it.
1866 */
1867 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
1869 /* Non-x86 Todo: explicitly flush cache lines here. */
1870
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001871 if (skb_vlan_tag_present(skb))
Roger Luethi38f49e82010-12-06 00:59:40 +00001872 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1873 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1874
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 /* Wake the potentially-idle transmit channel */
1876 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1877 ioaddr + ChipCmd1);
1878 IOSYNC;
1879
françois romieu3a5a8832015-05-01 22:14:45 +02001880 /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1881 if (rhine_tx_queue_full(rp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 netif_stop_queue(dev);
françois romieu3a5a8832015-05-01 22:14:45 +02001883 smp_rmb();
1884 /* Rejuvenate. */
1885 if (!rhine_tx_queue_full(rp))
1886 netif_wake_queue(dev);
1887 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
Francois Romieufc3e0f82012-01-07 22:39:37 +01001889 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1890 rp->cur_tx - 1, entry);
1891
Patrick McHardy6ed10652009-06-23 06:03:08 +00001892 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893}
1894
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001895static void rhine_irq_disable(struct rhine_private *rp)
1896{
1897 iowrite16(0x0000, rp->base + IntrEnable);
1898 mmiowb();
1899}
1900
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901/* The interrupt handler does all of the Rx thread work and cleans up
1902 after the Tx thread. */
David Howells7d12e782006-10-05 14:55:46 +01001903static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904{
1905 struct net_device *dev = dev_instance;
1906 struct rhine_private *rp = netdev_priv(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001907 u32 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 int handled = 0;
1909
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001910 status = rhine_get_events(rp);
1911
Francois Romieufc3e0f82012-01-07 22:39:37 +01001912 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001913
1914 if (status & RHINE_EVENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 handled = 1;
1916
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001917 rhine_irq_disable(rp);
1918 napi_schedule(&rp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 }
1920
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001921 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001922 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1923 status);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01001924 }
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 return IRQ_RETVAL(handled);
1927}
1928
1929/* This routine is logically part of the interrupt handler, but isolated
1930 for clarity. */
1931static void rhine_tx(struct net_device *dev)
1932{
1933 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04001934 struct device *hwdev = dev->dev.parent;
Tino Reichardt92bf2002015-02-24 10:28:01 -08001935 unsigned int pkts_compl = 0, bytes_compl = 0;
françois romieu3a5a8832015-05-01 22:14:45 +02001936 unsigned int dirty_tx = rp->dirty_tx;
1937 unsigned int cur_tx;
Tino Reichardt92bf2002015-02-24 10:28:01 -08001938 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939
françois romieu3a5a8832015-05-01 22:14:45 +02001940 /*
1941 * The race with rhine_start_tx does not matter here as long as the
1942 * driver enforces a value of cur_tx that was relevant when the
1943 * packet was scheduled to the network chipset.
1944 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1945 */
1946 smp_rmb();
1947 cur_tx = rp->cur_tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 /* find and cleanup dirty tx descriptors */
françois romieu3a5a8832015-05-01 22:14:45 +02001949 while (dirty_tx != cur_tx) {
1950 unsigned int entry = dirty_tx % TX_RING_SIZE;
1951 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1952
Francois Romieufc3e0f82012-01-07 22:39:37 +01001953 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1954 entry, txstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 if (txstatus & DescOwn)
1956 break;
Tino Reichardt92bf2002015-02-24 10:28:01 -08001957 skb = rp->tx_skbuff[entry];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 if (txstatus & 0x8000) {
Francois Romieufc3e0f82012-01-07 22:39:37 +01001959 netif_dbg(rp, tx_done, dev,
1960 "Transmit error, Tx status %08x\n", txstatus);
Eric Dumazet553e2332009-05-27 10:34:50 +00001961 dev->stats.tx_errors++;
1962 if (txstatus & 0x0400)
1963 dev->stats.tx_carrier_errors++;
1964 if (txstatus & 0x0200)
1965 dev->stats.tx_window_errors++;
1966 if (txstatus & 0x0100)
1967 dev->stats.tx_aborted_errors++;
1968 if (txstatus & 0x0080)
1969 dev->stats.tx_heartbeat_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1971 (txstatus & 0x0800) || (txstatus & 0x1000)) {
Eric Dumazet553e2332009-05-27 10:34:50 +00001972 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1974 break; /* Keep the skb - we try again */
1975 }
1976 /* Transmitter restarted in 'abnormal' handler. */
1977 } else {
1978 if (rp->quirks & rqRhineI)
Eric Dumazet553e2332009-05-27 10:34:50 +00001979 dev->stats.collisions += (txstatus >> 3) & 0x0F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 else
Eric Dumazet553e2332009-05-27 10:34:50 +00001981 dev->stats.collisions += txstatus & 0x0F;
Francois Romieufc3e0f82012-01-07 22:39:37 +01001982 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1983 (txstatus >> 3) & 0xF, txstatus & 0xF);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001984
1985 u64_stats_update_begin(&rp->tx_stats.syncp);
Tino Reichardt92bf2002015-02-24 10:28:01 -08001986 rp->tx_stats.bytes += skb->len;
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00001987 rp->tx_stats.packets++;
1988 u64_stats_update_end(&rp->tx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 }
1990 /* Free the original skb. */
1991 if (rp->tx_skbuff_dma[entry]) {
Alexey Charkovf7630d12014-04-22 19:28:08 +04001992 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 rp->tx_skbuff_dma[entry],
Tino Reichardt92bf2002015-02-24 10:28:01 -08001994 skb->len,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04001995 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 }
Tino Reichardt92bf2002015-02-24 10:28:01 -08001997 bytes_compl += skb->len;
1998 pkts_compl++;
1999 dev_consume_skb_any(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 rp->tx_skbuff[entry] = NULL;
françois romieu3a5a8832015-05-01 22:14:45 +02002001 dirty_tx++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 }
Tino Reichardt92bf2002015-02-24 10:28:01 -08002003
françois romieu3a5a8832015-05-01 22:14:45 +02002004 rp->dirty_tx = dirty_tx;
2005 /* Pity we can't rely on the nearby BQL completion implicit barrier. */
2006 smp_wmb();
2007
Tino Reichardt92bf2002015-02-24 10:28:01 -08002008 netdev_completed_queue(dev, pkts_compl, bytes_compl);
françois romieu3a5a8832015-05-01 22:14:45 +02002009
2010 /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
2011 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 netif_wake_queue(dev);
françois romieu3a5a8832015-05-01 22:14:45 +02002013 smp_rmb();
2014 /* Rejuvenate. */
2015 if (rhine_tx_queue_full(rp))
2016 netif_stop_queue(dev);
2017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018}
2019
Roger Luethi38f49e82010-12-06 00:59:40 +00002020/**
2021 * rhine_get_vlan_tci - extract TCI from Rx data buffer
2022 * @skb: pointer to sk_buff
2023 * @data_size: used data area of the buffer including CRC
2024 *
2025 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2026 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2027 * aligned following the CRC.
2028 */
2029static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2030{
2031 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
Harvey Harrison4562b2f2011-03-28 17:08:59 +00002032 return be16_to_cpup((__be16 *)trailer);
Roger Luethi38f49e82010-12-06 00:59:40 +00002033}
2034
françois romieu810f19b2015-05-01 22:14:43 +02002035static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2036 int data_size)
2037{
2038 dma_rmb();
2039 if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2040 u16 vlan_tci;
2041
2042 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2043 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2044 }
2045}
2046
Roger Luethi633949a2006-08-14 23:00:17 -07002047/* Process up to limit frames from receive ring */
2048static int rhine_rx(struct net_device *dev, int limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049{
2050 struct rhine_private *rp = netdev_priv(dev);
Alexey Charkovf7630d12014-04-22 19:28:08 +04002051 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 int entry = rp->cur_rx % RX_RING_SIZE;
françois romieu62ca1ba2015-05-01 22:14:42 +02002053 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
Francois Romieufc3e0f82012-01-07 22:39:37 +01002055 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
françois romieu62ca1ba2015-05-01 22:14:42 +02002056 entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 /* If EOP is set on the next entry, it's a new packet. Send it up. */
Roger Luethi633949a2006-08-14 23:00:17 -07002059 for (count = 0; count < limit; ++count) {
françois romieu62ca1ba2015-05-01 22:14:42 +02002060 struct rx_desc *desc = rp->rx_ring + entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 u32 desc_status = le32_to_cpu(desc->rx_status);
2062 int data_size = desc_status >> 16;
2063
Roger Luethi633949a2006-08-14 23:00:17 -07002064 if (desc_status & DescOwn)
2065 break;
2066
Francois Romieufc3e0f82012-01-07 22:39:37 +01002067 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2068 desc_status);
Roger Luethi633949a2006-08-14 23:00:17 -07002069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2071 if ((desc_status & RxWholePkt) != RxWholePkt) {
Joe Perchesdf4511f2011-04-16 14:15:25 +00002072 netdev_warn(dev,
2073 "Oversized Ethernet frame spanned multiple buffers, "
2074 "entry %#x length %d status %08x!\n",
2075 entry, data_size,
2076 desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00002077 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 } else if (desc_status & RxErr) {
2079 /* There was a error. */
Francois Romieufc3e0f82012-01-07 22:39:37 +01002080 netif_dbg(rp, rx_err, dev,
2081 "%s() Rx error %08x\n", __func__,
2082 desc_status);
Eric Dumazet553e2332009-05-27 10:34:50 +00002083 dev->stats.rx_errors++;
2084 if (desc_status & 0x0030)
2085 dev->stats.rx_length_errors++;
2086 if (desc_status & 0x0048)
2087 dev->stats.rx_fifo_errors++;
2088 if (desc_status & 0x0004)
2089 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 if (desc_status & 0x0002) {
2091 /* this can also be updated outside the interrupt handler */
2092 spin_lock(&rp->lock);
Eric Dumazet553e2332009-05-27 10:34:50 +00002093 dev->stats.rx_crc_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 spin_unlock(&rp->lock);
2095 }
2096 }
2097 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 /* Length should omit the CRC */
2099 int pkt_len = data_size - 4;
françois romieu8709bb22015-05-01 22:14:41 +02002100 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 /* Check if the packet is long enough to accept without
2103 copying to a minimally-sized skbuff. */
françois romieu8709bb22015-05-01 22:14:41 +02002104 if (pkt_len < rx_copybreak) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002105 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
françois romieu8709bb22015-05-01 22:14:41 +02002106 if (unlikely(!skb))
2107 goto drop;
2108
Alexey Charkovf7630d12014-04-22 19:28:08 +04002109 dma_sync_single_for_cpu(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002110 rp->rx_skbuff_dma[entry],
2111 rp->rx_buf_sz,
2112 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
David S. Miller8c7b7fa2007-07-10 22:08:12 -07002114 skb_copy_to_linear_data(skb,
David S. Miller689be432005-06-28 15:25:31 -07002115 rp->rx_skbuff[entry]->data,
David S. Miller8c7b7fa2007-07-10 22:08:12 -07002116 pkt_len);
françois romieu8709bb22015-05-01 22:14:41 +02002117
Alexey Charkovf7630d12014-04-22 19:28:08 +04002118 dma_sync_single_for_device(hwdev,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002119 rp->rx_skbuff_dma[entry],
2120 rp->rx_buf_sz,
2121 DMA_FROM_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 } else {
françois romieu8709bb22015-05-01 22:14:41 +02002123 struct rhine_skb_dma sd;
2124
2125 if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2126 goto drop;
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 skb = rp->rx_skbuff[entry];
françois romieu8709bb22015-05-01 22:14:41 +02002129
Alexey Charkovf7630d12014-04-22 19:28:08 +04002130 dma_unmap_single(hwdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 rp->rx_skbuff_dma[entry],
2132 rp->rx_buf_sz,
Alexey Charkov4087c4d2014-04-22 19:28:07 +04002133 DMA_FROM_DEVICE);
françois romieu8709bb22015-05-01 22:14:41 +02002134 rhine_skb_dma_nic_store(rp, &sd, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002136
françois romieu8709bb22015-05-01 22:14:41 +02002137 skb_put(skb, pkt_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 skb->protocol = eth_type_trans(skb, dev);
Roger Luethi38f49e82010-12-06 00:59:40 +00002139
françois romieu810f19b2015-05-01 22:14:43 +02002140 rhine_rx_vlan_tag(skb, desc, data_size);
2141
Roger Luethi633949a2006-08-14 23:00:17 -07002142 netif_receive_skb(skb);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002143
2144 u64_stats_update_begin(&rp->rx_stats.syncp);
2145 rp->rx_stats.bytes += pkt_len;
2146 rp->rx_stats.packets++;
2147 u64_stats_update_end(&rp->rx_stats.syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 }
françois romieu8709bb22015-05-01 22:14:41 +02002149give_descriptor_to_nic:
2150 desc->rx_status = cpu_to_le32(DescOwn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 entry = (++rp->cur_rx) % RX_RING_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 }
2153
Roger Luethi633949a2006-08-14 23:00:17 -07002154 return count;
françois romieu8709bb22015-05-01 22:14:41 +02002155
2156drop:
2157 dev->stats.rx_dropped++;
2158 goto give_descriptor_to_nic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159}
2160
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161static void rhine_restart_tx(struct net_device *dev) {
2162 struct rhine_private *rp = netdev_priv(dev);
2163 void __iomem *ioaddr = rp->base;
2164 int entry = rp->dirty_tx % TX_RING_SIZE;
2165 u32 intr_status;
2166
2167 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002168 * If new errors occurred, we need to sort them out before doing Tx.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 * In that case the ISR will be back here RSN anyway.
2170 */
Francois Romieua20a28b2011-12-30 14:53:58 +01002171 intr_status = rhine_get_events(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
2173 if ((intr_status & IntrTxErrSummary) == 0) {
2174
2175 /* We know better than the chip where it should continue. */
2176 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2177 ioaddr + TxRingPtr);
2178
2179 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2180 ioaddr + ChipCmd);
Roger Luethi38f49e82010-12-06 00:59:40 +00002181
2182 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2183 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2184 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2187 ioaddr + ChipCmd1);
2188 IOSYNC;
2189 }
2190 else {
2191 /* This should never happen */
Francois Romieufc3e0f82012-01-07 22:39:37 +01002192 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2193 intr_status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 }
2195
2196}
2197
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002198static void rhine_slow_event_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199{
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002200 struct rhine_private *rp =
2201 container_of(work, struct rhine_private, slow_event_task);
2202 struct net_device *dev = rp->dev;
2203 u32 intr_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002205 mutex_lock(&rp->task_lock);
2206
2207 if (!rp->task_enable)
2208 goto out_unlock;
2209
2210 intr_status = rhine_get_events(rp);
2211 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 if (intr_status & IntrLinkChange)
John W. Linville38bb6b22006-05-19 10:51:21 -04002214 rhine_check_media(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
Francois Romieufc3e0f82012-01-07 22:39:37 +01002216 if (intr_status & IntrPCIErr)
2217 netif_warn(rp, hw, dev, "PCI error\n");
2218
David S. Miller559bcac2013-01-29 22:58:04 -05002219 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002221out_unlock:
2222 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223}
2224
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002225static struct rtnl_link_stats64 *
2226rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227{
2228 struct rhine_private *rp = netdev_priv(dev);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002229 unsigned int start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002231 spin_lock_bh(&rp->lock);
2232 rhine_update_rx_crc_and_missed_errord(rp);
2233 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002235 netdev_stats_to_stats64(stats, &dev->stats);
2236
2237 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002238 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002239 stats->rx_packets = rp->rx_stats.packets;
2240 stats->rx_bytes = rp->rx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002241 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002242
2243 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07002244 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002245 stats->tx_packets = rp->tx_stats.packets;
2246 stats->tx_bytes = rp->tx_stats.bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -07002247 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
Jamie Gloudonf7b5d1b2013-01-23 18:05:04 +00002248
2249 return stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250}
2251
2252static void rhine_set_rx_mode(struct net_device *dev)
2253{
2254 struct rhine_private *rp = netdev_priv(dev);
2255 void __iomem *ioaddr = rp->base;
2256 u32 mc_filter[2]; /* Multicast hash filter */
Roger Luethi38f49e82010-12-06 00:59:40 +00002257 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2258 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
2260 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 rx_mode = 0x1C;
2262 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2263 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002264 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
Joe Perches8e95a202009-12-03 07:58:21 +00002265 (dev->flags & IFF_ALLMULTI)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 /* Too many to match, or accept all multicasts. */
2267 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2268 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
Alexey Charkovca8b6e02014-04-30 22:21:09 +04002269 } else if (rp->quirks & rqMgmt) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002270 int i = 0;
2271 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2272 netdev_for_each_mc_addr(ha, dev) {
2273 if (i == MCAM_SIZE)
2274 break;
2275 rhine_set_cam(ioaddr, i, ha->addr);
2276 mCAMmask |= 1 << i;
2277 i++;
2278 }
2279 rhine_set_cam_mask(ioaddr, mCAMmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 memset(mc_filter, 0, sizeof(mc_filter));
Jiri Pirko22bedad32010-04-01 21:22:57 +00002282 netdev_for_each_mc_addr(ha, dev) {
2283 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
2285 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2286 }
2287 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2288 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 }
Roger Luethi38f49e82010-12-06 00:59:40 +00002290 /* enable/disable VLAN receive filtering */
Alexey Charkovca8b6e02014-04-30 22:21:09 +04002291 if (rp->quirks & rqMgmt) {
Roger Luethi38f49e82010-12-06 00:59:40 +00002292 if (dev->flags & IFF_PROMISC)
2293 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2294 else
2295 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2296 }
2297 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298}
2299
2300static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2301{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002302 struct device *hwdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Rick Jones23020ab2011-11-09 09:58:07 +00002304 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2305 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Alexey Charkovf7630d12014-04-22 19:28:08 +04002306 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
2308
2309static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2310{
2311 struct rhine_private *rp = netdev_priv(dev);
2312 int rc;
2313
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002314 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 rc = mii_ethtool_gset(&rp->mii_if, cmd);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002316 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318 return rc;
2319}
2320
2321static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2322{
2323 struct rhine_private *rp = netdev_priv(dev);
2324 int rc;
2325
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002326 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 rc = mii_ethtool_sset(&rp->mii_if, cmd);
Roger Luethi00b428c2006-03-28 20:53:56 +02002328 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002329 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
2331 return rc;
2332}
2333
2334static int netdev_nway_reset(struct net_device *dev)
2335{
2336 struct rhine_private *rp = netdev_priv(dev);
2337
2338 return mii_nway_restart(&rp->mii_if);
2339}
2340
2341static u32 netdev_get_link(struct net_device *dev)
2342{
2343 struct rhine_private *rp = netdev_priv(dev);
2344
2345 return mii_link_ok(&rp->mii_if);
2346}
2347
2348static u32 netdev_get_msglevel(struct net_device *dev)
2349{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002350 struct rhine_private *rp = netdev_priv(dev);
2351
2352 return rp->msg_enable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353}
2354
2355static void netdev_set_msglevel(struct net_device *dev, u32 value)
2356{
Francois Romieufc3e0f82012-01-07 22:39:37 +01002357 struct rhine_private *rp = netdev_priv(dev);
2358
2359 rp->msg_enable = value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360}
2361
2362static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2363{
2364 struct rhine_private *rp = netdev_priv(dev);
2365
2366 if (!(rp->quirks & rqWOL))
2367 return;
2368
2369 spin_lock_irq(&rp->lock);
2370 wol->supported = WAKE_PHY | WAKE_MAGIC |
2371 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2372 wol->wolopts = rp->wolopts;
2373 spin_unlock_irq(&rp->lock);
2374}
2375
2376static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2377{
2378 struct rhine_private *rp = netdev_priv(dev);
2379 u32 support = WAKE_PHY | WAKE_MAGIC |
2380 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2381
2382 if (!(rp->quirks & rqWOL))
2383 return -EINVAL;
2384
2385 if (wol->wolopts & ~support)
2386 return -EINVAL;
2387
2388 spin_lock_irq(&rp->lock);
2389 rp->wolopts = wol->wolopts;
2390 spin_unlock_irq(&rp->lock);
2391
2392 return 0;
2393}
2394
Jeff Garzik7282d492006-09-13 14:30:00 -04002395static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 .get_drvinfo = netdev_get_drvinfo,
2397 .get_settings = netdev_get_settings,
2398 .set_settings = netdev_set_settings,
2399 .nway_reset = netdev_nway_reset,
2400 .get_link = netdev_get_link,
2401 .get_msglevel = netdev_get_msglevel,
2402 .set_msglevel = netdev_set_msglevel,
2403 .get_wol = rhine_get_wol,
2404 .set_wol = rhine_set_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405};
2406
2407static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2408{
2409 struct rhine_private *rp = netdev_priv(dev);
2410 int rc;
2411
2412 if (!netif_running(dev))
2413 return -EINVAL;
2414
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002415 mutex_lock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
Roger Luethi00b428c2006-03-28 20:53:56 +02002417 rhine_set_carrier(&rp->mii_if);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002418 mutex_unlock(&rp->task_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
2420 return rc;
2421}
2422
2423static int rhine_close(struct net_device *dev)
2424{
2425 struct rhine_private *rp = netdev_priv(dev);
2426 void __iomem *ioaddr = rp->base;
2427
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002428 rhine_task_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002429 napi_disable(&rp->napi);
Jarek Poplawskic0d7a022009-12-23 21:54:29 -08002430 netif_stop_queue(dev);
2431
Francois Romieufc3e0f82012-01-07 22:39:37 +01002432 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2433 ioread16(ioaddr + ChipCmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
2435 /* Switch to loopback mode to avoid hardware races. */
2436 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2437
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002438 rhine_irq_disable(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
2440 /* Stop the chip's Tx and Rx processes. */
2441 iowrite16(CmdStop, ioaddr + ChipCmd);
2442
Alexey Charkovf7630d12014-04-22 19:28:08 +04002443 free_irq(rp->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 free_rbufs(dev);
2445 free_tbufs(dev);
2446 free_ring(dev);
2447
2448 return 0;
2449}
2450
2451
Alexey Charkov2d283862014-04-22 19:28:09 +04002452static void rhine_remove_one_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453{
2454 struct net_device *dev = pci_get_drvdata(pdev);
2455 struct rhine_private *rp = netdev_priv(dev);
2456
2457 unregister_netdev(dev);
2458
2459 pci_iounmap(pdev, rp->base);
2460 pci_release_regions(pdev);
2461
2462 free_netdev(dev);
2463 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464}
2465
Alexey Charkov2d283862014-04-22 19:28:09 +04002466static int rhine_remove_one_platform(struct platform_device *pdev)
2467{
2468 struct net_device *dev = platform_get_drvdata(pdev);
2469 struct rhine_private *rp = netdev_priv(dev);
2470
2471 unregister_netdev(dev);
2472
2473 iounmap(rp->base);
2474
2475 free_netdev(dev);
2476
2477 return 0;
2478}
2479
2480static void rhine_shutdown_pci(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 struct net_device *dev = pci_get_drvdata(pdev);
2483 struct rhine_private *rp = netdev_priv(dev);
2484 void __iomem *ioaddr = rp->base;
2485
2486 if (!(rp->quirks & rqWOL))
2487 return; /* Nothing to do for non-WOL adapters */
2488
2489 rhine_power_init(dev);
2490
2491 /* Make sure we use pattern 0, 1 and not 4, 5 */
2492 if (rp->quirks & rq6patterns)
Laura Garciaf11cf252008-02-23 18:56:35 +01002493 iowrite8(0x04, ioaddr + WOLcgClr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002495 spin_lock(&rp->lock);
2496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 if (rp->wolopts & WAKE_MAGIC) {
2498 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2499 /*
2500 * Turn EEPROM-controlled wake-up back on -- some hardware may
2501 * not cooperate otherwise.
2502 */
2503 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2504 }
2505
2506 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2507 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2508
2509 if (rp->wolopts & WAKE_PHY)
2510 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2511
2512 if (rp->wolopts & WAKE_UCAST)
2513 iowrite8(WOLucast, ioaddr + WOLcrSet);
2514
2515 if (rp->wolopts) {
2516 /* Enable legacy WOL (for old motherboards) */
2517 iowrite8(0x01, ioaddr + PwcfgSet);
2518 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2519 }
2520
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002521 spin_unlock(&rp->lock);
2522
Francois Romieue92b9b32012-01-07 22:58:27 +01002523 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
Roger Luethib933b4d2006-08-14 23:00:21 -07002524 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
Francois Romieue92b9b32012-01-07 22:58:27 +01002526 pci_wake_from_d3(pdev, true);
2527 pci_set_power_state(pdev, PCI_D3hot);
2528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529}
2530
Francois Romieue92b9b32012-01-07 22:58:27 +01002531#ifdef CONFIG_PM_SLEEP
2532static int rhine_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002534 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
2537 if (!netif_running(dev))
2538 return 0;
2539
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002540 rhine_task_disable(rp);
2541 rhine_irq_disable(rp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002542 napi_disable(&rp->napi);
Francois Romieu32b0f532008-07-11 00:30:14 +02002543
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 netif_device_detach(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
Alexey Charkovf7630d12014-04-22 19:28:08 +04002546 if (dev_is_pci(device))
Alexey Charkov2d283862014-04-22 19:28:09 +04002547 rhine_shutdown_pci(to_pci_dev(device));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 return 0;
2550}
2551
Francois Romieue92b9b32012-01-07 22:58:27 +01002552static int rhine_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553{
Alexey Charkovf7630d12014-04-22 19:28:08 +04002554 struct net_device *dev = dev_get_drvdata(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 struct rhine_private *rp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
2557 if (!netif_running(dev))
2558 return 0;
2559
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 enable_mmio(rp->pioaddr, rp->quirks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 rhine_power_init(dev);
2562 free_tbufs(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 alloc_tbufs(dev);
françois romieu8709bb22015-05-01 22:14:41 +02002564 rhine_reset_rbufs(rp);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002565 rhine_task_enable(rp);
2566 spin_lock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 init_registers(dev);
Francois Romieu7ab87ff2012-01-06 21:42:26 +01002568 spin_unlock_bh(&rp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
2570 netif_device_attach(dev);
2571
2572 return 0;
2573}
Francois Romieue92b9b32012-01-07 22:58:27 +01002574
2575static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2576#define RHINE_PM_OPS (&rhine_pm_ops)
2577
2578#else
2579
2580#define RHINE_PM_OPS NULL
2581
2582#endif /* !CONFIG_PM_SLEEP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583
Alexey Charkov2d283862014-04-22 19:28:09 +04002584static struct pci_driver rhine_driver_pci = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 .name = DRV_NAME,
2586 .id_table = rhine_pci_tbl,
Alexey Charkov2d283862014-04-22 19:28:09 +04002587 .probe = rhine_init_one_pci,
2588 .remove = rhine_remove_one_pci,
2589 .shutdown = rhine_shutdown_pci,
Francois Romieue92b9b32012-01-07 22:58:27 +01002590 .driver.pm = RHINE_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591};
2592
Alexey Charkov2d283862014-04-22 19:28:09 +04002593static struct platform_driver rhine_driver_platform = {
2594 .probe = rhine_init_one_platform,
2595 .remove = rhine_remove_one_platform,
2596 .driver = {
2597 .name = DRV_NAME,
Alexey Charkov2d283862014-04-22 19:28:09 +04002598 .of_match_table = rhine_of_tbl,
2599 .pm = RHINE_PM_OPS,
2600 }
2601};
2602
Sachin Kamat77273ea2013-08-07 16:08:16 +05302603static struct dmi_system_id rhine_dmi_table[] __initdata = {
Roger Luethie84df482007-03-06 19:57:37 +01002604 {
2605 .ident = "EPIA-M",
2606 .matches = {
2607 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2608 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2609 },
2610 },
2611 {
2612 .ident = "KV7",
2613 .matches = {
2614 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2615 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2616 },
2617 },
2618 { NULL }
2619};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620
2621static int __init rhine_init(void)
2622{
Alexey Charkov2d283862014-04-22 19:28:09 +04002623 int ret_pci, ret_platform;
2624
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625/* when a module, this is printed whether or not devices are found in probe */
2626#ifdef MODULE
Joe Perchesdf4511f2011-04-16 14:15:25 +00002627 pr_info("%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628#endif
Roger Luethie84df482007-03-06 19:57:37 +01002629 if (dmi_check_system(rhine_dmi_table)) {
2630 /* these BIOSes fail at PXE boot if chip is in D3 */
Rusty Russelleb939922011-12-19 14:08:01 +00002631 avoid_D3 = true;
Joe Perchesdf4511f2011-04-16 14:15:25 +00002632 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
Roger Luethie84df482007-03-06 19:57:37 +01002633 }
2634 else if (avoid_D3)
Joe Perchesdf4511f2011-04-16 14:15:25 +00002635 pr_info("avoid_D3 set\n");
Roger Luethie84df482007-03-06 19:57:37 +01002636
Alexey Charkov2d283862014-04-22 19:28:09 +04002637 ret_pci = pci_register_driver(&rhine_driver_pci);
2638 ret_platform = platform_driver_register(&rhine_driver_platform);
2639 if ((ret_pci < 0) && (ret_platform < 0))
2640 return ret_pci;
2641
2642 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643}
2644
2645
2646static void __exit rhine_cleanup(void)
2647{
Alexey Charkov2d283862014-04-22 19:28:09 +04002648 platform_driver_unregister(&rhine_driver_platform);
2649 pci_unregister_driver(&rhine_driver_pci);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650}
2651
2652
2653module_init(rhine_init);
2654module_exit(rhine_cleanup);