blob: 5cc976d01189e3e4e8706f46fa965cf8f60246f8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Matt Carlson9e056c02012-02-13 15:20:17 +00007 * Copyright (C) 2005-2012 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
Matt Carlson6867c842010-07-11 09:31:44 +000021#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020027#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000029#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
Matt Carlson3110f5f52010-12-06 08:28:50 +000036#include <linux/mdio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070038#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070039#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070044#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020045#include <linux/dma-mapping.h>
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -080046#include <linux/firmware.h>
Michael Chanaed93e02012-07-16 16:24:02 +000047#include <linux/hwmon.h>
48#include <linux/hwmon-sysfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030051#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Javier Martinez Canillas27fd9de2011-03-26 16:42:31 +000053#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#include <asm/byteorder.h>
Javier Martinez Canillas27fd9de2011-03-26 16:42:31 +000055#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Miller49b6e95f2007-03-29 01:38:42 -070057#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070059#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#endif
61
Matt Carlson63532392008-11-03 16:49:57 -080062#define BAR_0 0
63#define BAR_2 2
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include "tg3.h"
66
Joe Perches63c3a662011-04-26 08:12:10 +000067/* Functions & macros to verify TG3_FLAGS types */
68
69static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70{
71 return test_bit(flag, bits);
72}
73
74static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75{
76 set_bit(flag, bits);
77}
78
79static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80{
81 clear_bit(flag, bits);
82}
83
84#define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86#define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88#define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#define DRV_MODULE_NAME "tg3"
Matt Carlson6867c842010-07-11 09:31:44 +000092#define TG3_MAJ_NUM 3
Michael Chan0b3ba052012-11-14 14:44:29 +000093#define TG3_MIN_NUM 127
Matt Carlson6867c842010-07-11 09:31:44 +000094#define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
Michael Chan0b3ba052012-11-14 14:44:29 +000096#define DRV_MODULE_RELDATE "November 14, 2012"
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Matt Carlsonfd6d3f02011-08-31 11:44:52 +000098#define RESET_KIND_SHUTDOWN 0
99#define RESET_KIND_INIT 1
100#define RESET_KIND_SUSPEND 2
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#define TG3_DEF_RX_MODE 0
103#define TG3_DEF_TX_MODE 0
104#define TG3_DEF_MSG_ENABLE \
105 (NETIF_MSG_DRV | \
106 NETIF_MSG_PROBE | \
107 NETIF_MSG_LINK | \
108 NETIF_MSG_TIMER | \
109 NETIF_MSG_IFDOWN | \
110 NETIF_MSG_IFUP | \
111 NETIF_MSG_RX_ERR | \
112 NETIF_MSG_TX_ERR)
113
Matt Carlson520b2752011-06-13 13:39:02 +0000114#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/* length of time before we decide the hardware is borked,
117 * and dev->tx_timeout() should be called to fix the problem
118 */
Joe Perches63c3a662011-04-26 08:12:10 +0000119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120#define TG3_TX_TIMEOUT (5 * HZ)
121
122/* hardware minimum and maximum for a single frame's data payload */
123#define TG3_MIN_MTU 60
124#define TG3_MAX_MTU(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127/* These numbers seem to be hard coded in the NIC firmware somehow.
128 * You can't change the ring sizes, but you can change where you place
129 * them in the NIC onboard memory.
130 */
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000131#define TG3_RX_STD_RING_SIZE(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
Matt Carlsonde9f5232011-04-05 14:22:43 +0000133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134#define TG3_DEF_RX_RING_PENDING 200
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000135#define TG3_RX_JMB_RING_SIZE(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
Matt Carlsonde9f5232011-04-05 14:22:43 +0000137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#define TG3_DEF_RX_JUMBO_RING_PENDING 100
139
140/* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
145 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147#define TG3_TX_RING_SIZE 512
148#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149
Matt Carlson2c49a442010-09-30 10:34:35 +0000150#define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152#define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154#define TG3_RX_RCB_RING_BYTES(tp) \
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
Matt Carlson287be122009-08-28 13:58:46 +0000160#define TG3_DMA_BYTE_ENAB 64
161
162#define TG3_RX_STD_DMA_SZ 1536
163#define TG3_RX_JMB_DMA_SZ 9046
164
165#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166
167#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Matt Carlson2c49a442010-09-30 10:34:35 +0000170#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
Matt Carlson2b2cdb62009-11-13 13:03:48 +0000172
Matt Carlson2c49a442010-09-30 10:34:35 +0000173#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
Matt Carlson2b2cdb62009-11-13 13:03:48 +0000175
Matt Carlsond2757fc2010-04-12 06:58:27 +0000176/* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
180 *
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
186 */
187#define TG3_RX_COPY_THRESHOLD 256
188#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190#else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192#endif
193
Matt Carlson81389f52011-08-31 11:44:49 +0000194#if (NET_IP_ALIGN != 0)
195#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196#else
Eric Dumazet9205fd92011-11-18 06:47:01 +0000197#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
Matt Carlson81389f52011-08-31 11:44:49 +0000198#endif
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/* minimum number of free TX descriptors required to wake up TX process */
Matt Carlsonf3f3f272009-08-28 14:03:21 +0000201#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
Matt Carlson55086ad2011-12-14 11:09:59 +0000202#define TG3_TX_BD_DMA_MAX_2K 2048
Matt Carlsona4cb4282011-12-14 11:09:58 +0000203#define TG3_TX_BD_DMA_MAX_4K 4096
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Matt Carlsonad829262008-11-21 17:16:16 -0800205#define TG3_RAW_IP_ALIGN 2
206
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000207#define TG3_FW_UPDATE_TIMEOUT_SEC 5
Matt Carlson21f76382012-02-22 12:35:21 +0000208#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000209
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -0800210#define FIRMWARE_TG3 "tigon/tg3.bin"
211#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214static char version[] __devinitdata =
Joe Perches05dbe002010-02-17 19:44:19 +0000215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_MODULE_VERSION);
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -0800221MODULE_FIRMWARE(FIRMWARE_TG3);
222MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226module_param(tg3_debug, int, 0);
227MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000229#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
230#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
231
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000232static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
252 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253 TG3_DRV_DATA_FLAG_5705_10_100},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000280 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
281 PCI_VENDOR_ID_LENOVO,
282 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson2befdce2009-08-28 12:28:45 +0000303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000305 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
306 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
307 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson321d32a2008-11-21 17:22:19 -0800311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
314 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson5e7ccf22009-08-25 10:08:42 +0000315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
Matt Carlson5001e2f2009-11-13 13:03:51 +0000316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
Michael Chan79d49692012-11-05 14:26:29 +0000317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
Matt Carlson5001e2f2009-11-13 13:03:51 +0000318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
Matt Carlsonb0f75222010-01-20 16:58:11 +0000319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
324 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson302b5002010-06-05 17:24:38 +0000327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
Matt Carlsonba1f3c72011-04-05 14:22:50 +0000328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
Greg KH02eca3f2012-07-12 15:39:44 +0000329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700330 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
331 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
332 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
333 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
334 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
336 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
Meelis Roos1dcb14d2011-05-25 05:43:47 +0000337 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700338 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339};
340
341MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
342
Andreas Mohr50da8592006-08-14 23:54:30 -0700343static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 const char string[ETH_GSTRING_LEN];
Matt Carlson48fa55a2011-04-13 11:05:06 +0000345} ethtool_stats_keys[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 { "rx_octets" },
347 { "rx_fragments" },
348 { "rx_ucast_packets" },
349 { "rx_mcast_packets" },
350 { "rx_bcast_packets" },
351 { "rx_fcs_errors" },
352 { "rx_align_errors" },
353 { "rx_xon_pause_rcvd" },
354 { "rx_xoff_pause_rcvd" },
355 { "rx_mac_ctrl_rcvd" },
356 { "rx_xoff_entered" },
357 { "rx_frame_too_long_errors" },
358 { "rx_jabbers" },
359 { "rx_undersize_packets" },
360 { "rx_in_length_errors" },
361 { "rx_out_length_errors" },
362 { "rx_64_or_less_octet_packets" },
363 { "rx_65_to_127_octet_packets" },
364 { "rx_128_to_255_octet_packets" },
365 { "rx_256_to_511_octet_packets" },
366 { "rx_512_to_1023_octet_packets" },
367 { "rx_1024_to_1522_octet_packets" },
368 { "rx_1523_to_2047_octet_packets" },
369 { "rx_2048_to_4095_octet_packets" },
370 { "rx_4096_to_8191_octet_packets" },
371 { "rx_8192_to_9022_octet_packets" },
372
373 { "tx_octets" },
374 { "tx_collisions" },
375
376 { "tx_xon_sent" },
377 { "tx_xoff_sent" },
378 { "tx_flow_control" },
379 { "tx_mac_errors" },
380 { "tx_single_collisions" },
381 { "tx_mult_collisions" },
382 { "tx_deferred" },
383 { "tx_excessive_collisions" },
384 { "tx_late_collisions" },
385 { "tx_collide_2times" },
386 { "tx_collide_3times" },
387 { "tx_collide_4times" },
388 { "tx_collide_5times" },
389 { "tx_collide_6times" },
390 { "tx_collide_7times" },
391 { "tx_collide_8times" },
392 { "tx_collide_9times" },
393 { "tx_collide_10times" },
394 { "tx_collide_11times" },
395 { "tx_collide_12times" },
396 { "tx_collide_13times" },
397 { "tx_collide_14times" },
398 { "tx_collide_15times" },
399 { "tx_ucast_packets" },
400 { "tx_mcast_packets" },
401 { "tx_bcast_packets" },
402 { "tx_carrier_sense_errors" },
403 { "tx_discards" },
404 { "tx_errors" },
405
406 { "dma_writeq_full" },
407 { "dma_write_prioq_full" },
408 { "rxbds_empty" },
409 { "rx_discards" },
410 { "rx_errors" },
411 { "rx_threshold_hit" },
412
413 { "dma_readq_full" },
414 { "dma_read_prioq_full" },
415 { "tx_comp_queue_full" },
416
417 { "ring_set_send_prod_index" },
418 { "ring_status_update" },
419 { "nic_irqs" },
420 { "nic_avoided_irqs" },
Matt Carlson4452d092011-05-19 12:12:51 +0000421 { "nic_tx_threshold_hit" },
422
423 { "mbuf_lwm_thresh_hit" },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424};
425
Matt Carlson48fa55a2011-04-13 11:05:06 +0000426#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +0000427#define TG3_NVRAM_TEST 0
428#define TG3_LINK_TEST 1
429#define TG3_REGISTER_TEST 2
430#define TG3_MEMORY_TEST 3
431#define TG3_MAC_LOOPB_TEST 4
432#define TG3_PHY_LOOPB_TEST 5
433#define TG3_EXT_LOOPB_TEST 6
434#define TG3_INTERRUPT_TEST 7
Matt Carlson48fa55a2011-04-13 11:05:06 +0000435
436
Andreas Mohr50da8592006-08-14 23:54:30 -0700437static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700438 const char string[ETH_GSTRING_LEN];
Matt Carlson48fa55a2011-04-13 11:05:06 +0000439} ethtool_test_keys[] = {
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +0000440 [TG3_NVRAM_TEST] = { "nvram test (online) " },
441 [TG3_LINK_TEST] = { "link test (online) " },
442 [TG3_REGISTER_TEST] = { "register test (offline)" },
443 [TG3_MEMORY_TEST] = { "memory test (offline)" },
444 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
445 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
446 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
447 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
Michael Chan4cafd3f2005-05-29 14:56:34 -0700448};
449
Matt Carlson48fa55a2011-04-13 11:05:06 +0000450#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
451
452
Michael Chanb401e9e2005-12-19 16:27:04 -0800453static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
454{
455 writel(val, tp->regs + off);
456}
457
458static u32 tg3_read32(struct tg3 *tp, u32 off)
459{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000460 return readl(tp->regs + off);
Michael Chanb401e9e2005-12-19 16:27:04 -0800461}
462
Matt Carlson0d3031d2007-10-10 18:02:43 -0700463static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
464{
465 writel(val, tp->aperegs + off);
466}
467
468static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
469{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000470 return readl(tp->aperegs + off);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700471}
472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
474{
Michael Chan68929142005-08-09 20:17:14 -0700475 unsigned long flags;
476
477 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700478 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
479 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700480 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700481}
482
483static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
484{
485 writel(val, tp->regs + off);
486 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487}
488
Michael Chan68929142005-08-09 20:17:14 -0700489static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
490{
491 unsigned long flags;
492 u32 val;
493
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 return val;
499}
500
501static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
502{
503 unsigned long flags;
504
505 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
506 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
507 TG3_64BIT_REG_LOW, val);
508 return;
509 }
Matt Carlson66711e662009-11-13 13:03:49 +0000510 if (off == TG3_RX_STD_PROD_IDX_REG) {
Michael Chan68929142005-08-09 20:17:14 -0700511 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
512 TG3_64BIT_REG_LOW, val);
513 return;
514 }
515
516 spin_lock_irqsave(&tp->indirect_lock, flags);
517 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
518 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
519 spin_unlock_irqrestore(&tp->indirect_lock, flags);
520
521 /* In indirect mode when disabling interrupts, we also need
522 * to clear the interrupt bit in the GRC local ctrl register.
523 */
524 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
525 (val == 0x1)) {
526 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
527 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
528 }
529}
530
531static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
532{
533 unsigned long flags;
534 u32 val;
535
536 spin_lock_irqsave(&tp->indirect_lock, flags);
537 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
538 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
539 spin_unlock_irqrestore(&tp->indirect_lock, flags);
540 return val;
541}
542
Michael Chanb401e9e2005-12-19 16:27:04 -0800543/* usec_wait specifies the wait time in usec when writing to certain registers
544 * where it is unsafe to read back the register without some delay.
545 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
546 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
547 */
548static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
Joe Perches63c3a662011-04-26 08:12:10 +0000550 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
Michael Chanb401e9e2005-12-19 16:27:04 -0800551 /* Non-posted methods */
552 tp->write32(tp, off, val);
553 else {
554 /* Posted method */
555 tg3_write32(tp, off, val);
556 if (usec_wait)
557 udelay(usec_wait);
558 tp->read32(tp, off);
559 }
560 /* Wait again after the read for the posted method to guarantee that
561 * the wait time is met.
562 */
563 if (usec_wait)
564 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
Michael Chan09ee9292005-08-09 20:17:00 -0700567static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
568{
569 tp->write32_mbox(tp, off, val);
Joe Perches63c3a662011-04-26 08:12:10 +0000570 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
Michael Chan68929142005-08-09 20:17:14 -0700571 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700572}
573
Michael Chan20094932005-08-09 20:16:32 -0700574static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
576 void __iomem *mbox = tp->regs + off;
577 writel(val, mbox);
Joe Perches63c3a662011-04-26 08:12:10 +0000578 if (tg3_flag(tp, TXD_MBOX_HWBUG))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 writel(val, mbox);
Joe Perches63c3a662011-04-26 08:12:10 +0000580 if (tg3_flag(tp, MBOX_WRITE_REORDER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 readl(mbox);
582}
583
Michael Chanb5d37722006-09-27 16:06:21 -0700584static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
585{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000586 return readl(tp->regs + off + GRCMBOX_BASE);
Michael Chanb5d37722006-09-27 16:06:21 -0700587}
588
589static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
590{
591 writel(val, tp->regs + off + GRCMBOX_BASE);
592}
593
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000594#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700595#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000596#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
597#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
598#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700599
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000600#define tw32(reg, val) tp->write32(tp, reg, val)
601#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
602#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
603#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
606{
Michael Chan68929142005-08-09 20:17:14 -0700607 unsigned long flags;
608
Matt Carlson6ff6f812011-05-19 12:12:54 +0000609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
Michael Chanb5d37722006-09-27 16:06:21 -0700610 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
611 return;
612
Michael Chan68929142005-08-09 20:17:14 -0700613 spin_lock_irqsave(&tp->indirect_lock, flags);
Joe Perches63c3a662011-04-26 08:12:10 +0000614 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
Michael Chanbbadf502006-04-06 21:46:34 -0700615 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
616 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Michael Chanbbadf502006-04-06 21:46:34 -0700618 /* Always leave this as zero. */
619 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
620 } else {
621 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
622 tw32_f(TG3PCI_MEM_WIN_DATA, val);
623
624 /* Always leave this as zero. */
625 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
626 }
Michael Chan68929142005-08-09 20:17:14 -0700627 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
630static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
631{
Michael Chan68929142005-08-09 20:17:14 -0700632 unsigned long flags;
633
Matt Carlson6ff6f812011-05-19 12:12:54 +0000634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
Michael Chanb5d37722006-09-27 16:06:21 -0700635 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
636 *val = 0;
637 return;
638 }
639
Michael Chan68929142005-08-09 20:17:14 -0700640 spin_lock_irqsave(&tp->indirect_lock, flags);
Joe Perches63c3a662011-04-26 08:12:10 +0000641 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
Michael Chanbbadf502006-04-06 21:46:34 -0700642 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
643 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Michael Chanbbadf502006-04-06 21:46:34 -0700645 /* Always leave this as zero. */
646 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 } else {
648 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
649 *val = tr32(TG3PCI_MEM_WIN_DATA);
650
651 /* Always leave this as zero. */
652 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
653 }
Michael Chan68929142005-08-09 20:17:14 -0700654 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
Matt Carlson0d3031d2007-10-10 18:02:43 -0700657static void tg3_ape_lock_init(struct tg3 *tp)
658{
659 int i;
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000660 u32 regbase, bit;
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000661
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 regbase = TG3_APE_LOCK_GRANT;
664 else
665 regbase = TG3_APE_PER_LOCK_GRANT;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700666
667 /* Make sure the driver hasn't any stale locks. */
Matt Carlson78f94dc2011-11-04 09:14:58 +0000668 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
669 switch (i) {
670 case TG3_APE_LOCK_PHY0:
671 case TG3_APE_LOCK_PHY1:
672 case TG3_APE_LOCK_PHY2:
673 case TG3_APE_LOCK_PHY3:
674 bit = APE_LOCK_GRANT_DRIVER;
675 break;
676 default:
677 if (!tp->pci_fn)
678 bit = APE_LOCK_GRANT_DRIVER;
679 else
680 bit = 1 << tp->pci_fn;
681 }
682 tg3_ape_write32(tp, regbase + 4 * i, bit);
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000683 }
684
Matt Carlson0d3031d2007-10-10 18:02:43 -0700685}
686
687static int tg3_ape_lock(struct tg3 *tp, int locknum)
688{
689 int i, off;
690 int ret = 0;
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000691 u32 status, req, gnt, bit;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700692
Joe Perches63c3a662011-04-26 08:12:10 +0000693 if (!tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -0700694 return 0;
695
696 switch (locknum) {
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000697 case TG3_APE_LOCK_GPIO:
698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
699 return 0;
Matt Carlson33f401a2010-04-05 10:19:27 +0000700 case TG3_APE_LOCK_GRC:
701 case TG3_APE_LOCK_MEM:
Matt Carlson78f94dc2011-11-04 09:14:58 +0000702 if (!tp->pci_fn)
703 bit = APE_LOCK_REQ_DRIVER;
704 else
705 bit = 1 << tp->pci_fn;
Matt Carlson33f401a2010-04-05 10:19:27 +0000706 break;
Michael Chan8151ad52012-07-29 19:15:41 +0000707 case TG3_APE_LOCK_PHY0:
708 case TG3_APE_LOCK_PHY1:
709 case TG3_APE_LOCK_PHY2:
710 case TG3_APE_LOCK_PHY3:
711 bit = APE_LOCK_REQ_DRIVER;
712 break;
Matt Carlson33f401a2010-04-05 10:19:27 +0000713 default:
714 return -EINVAL;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700715 }
716
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
718 req = TG3_APE_LOCK_REQ;
719 gnt = TG3_APE_LOCK_GRANT;
720 } else {
721 req = TG3_APE_PER_LOCK_REQ;
722 gnt = TG3_APE_PER_LOCK_GRANT;
723 }
724
Matt Carlson0d3031d2007-10-10 18:02:43 -0700725 off = 4 * locknum;
726
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000727 tg3_ape_write32(tp, req + off, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700728
729 /* Wait for up to 1 millisecond to acquire lock. */
730 for (i = 0; i < 100; i++) {
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000731 status = tg3_ape_read32(tp, gnt + off);
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000732 if (status == bit)
Matt Carlson0d3031d2007-10-10 18:02:43 -0700733 break;
734 udelay(10);
735 }
736
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000737 if (status != bit) {
Matt Carlson0d3031d2007-10-10 18:02:43 -0700738 /* Revoke the lock request. */
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000739 tg3_ape_write32(tp, gnt + off, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700740 ret = -EBUSY;
741 }
742
743 return ret;
744}
745
746static void tg3_ape_unlock(struct tg3 *tp, int locknum)
747{
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000748 u32 gnt, bit;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700749
Joe Perches63c3a662011-04-26 08:12:10 +0000750 if (!tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -0700751 return;
752
753 switch (locknum) {
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000754 case TG3_APE_LOCK_GPIO:
755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
756 return;
Matt Carlson33f401a2010-04-05 10:19:27 +0000757 case TG3_APE_LOCK_GRC:
758 case TG3_APE_LOCK_MEM:
Matt Carlson78f94dc2011-11-04 09:14:58 +0000759 if (!tp->pci_fn)
760 bit = APE_LOCK_GRANT_DRIVER;
761 else
762 bit = 1 << tp->pci_fn;
Matt Carlson33f401a2010-04-05 10:19:27 +0000763 break;
Michael Chan8151ad52012-07-29 19:15:41 +0000764 case TG3_APE_LOCK_PHY0:
765 case TG3_APE_LOCK_PHY1:
766 case TG3_APE_LOCK_PHY2:
767 case TG3_APE_LOCK_PHY3:
768 bit = APE_LOCK_GRANT_DRIVER;
769 break;
Matt Carlson33f401a2010-04-05 10:19:27 +0000770 default:
771 return;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700772 }
773
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
775 gnt = TG3_APE_LOCK_GRANT;
776 else
777 gnt = TG3_APE_PER_LOCK_GRANT;
778
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000779 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700780}
781
Matt Carlsonb65a3722012-07-16 16:24:00 +0000782static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000783{
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000784 u32 apedata;
785
Matt Carlsonb65a3722012-07-16 16:24:00 +0000786 while (timeout_us) {
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000787 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
Matt Carlsonb65a3722012-07-16 16:24:00 +0000788 return -EBUSY;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000789
790 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000791 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
792 break;
793
Matt Carlsonb65a3722012-07-16 16:24:00 +0000794 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
795
796 udelay(10);
797 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000798 }
799
Matt Carlsonb65a3722012-07-16 16:24:00 +0000800 return timeout_us ? 0 : -EBUSY;
801}
802
Matt Carlsoncf8d55a2012-07-16 16:24:01 +0000803static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
804{
805 u32 i, apedata;
806
807 for (i = 0; i < timeout_us / 10; i++) {
808 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
809
810 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
811 break;
812
813 udelay(10);
814 }
815
816 return i == timeout_us / 10;
817}
818
Michael Chan86449942012-10-02 20:31:14 -0700819static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
820 u32 len)
Matt Carlsoncf8d55a2012-07-16 16:24:01 +0000821{
822 int err;
823 u32 i, bufoff, msgoff, maxlen, apedata;
824
825 if (!tg3_flag(tp, APE_HAS_NCSI))
826 return 0;
827
828 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
829 if (apedata != APE_SEG_SIG_MAGIC)
830 return -ENODEV;
831
832 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
833 if (!(apedata & APE_FW_STATUS_READY))
834 return -EAGAIN;
835
836 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
837 TG3_APE_SHMEM_BASE;
838 msgoff = bufoff + 2 * sizeof(u32);
839 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
840
841 while (len) {
842 u32 length;
843
844 /* Cap xfer sizes to scratchpad limits. */
845 length = (len > maxlen) ? maxlen : len;
846 len -= length;
847
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
850 return -EAGAIN;
851
852 /* Wait for up to 1 msec for APE to service previous event. */
853 err = tg3_ape_event_lock(tp, 1000);
854 if (err)
855 return err;
856
857 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
858 APE_EVENT_STATUS_SCRTCHPD_READ |
859 APE_EVENT_STATUS_EVENT_PENDING;
860 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
861
862 tg3_ape_write32(tp, bufoff, base_off);
863 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
864
865 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
866 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
867
868 base_off += length;
869
870 if (tg3_ape_wait_for_event(tp, 30000))
871 return -EAGAIN;
872
873 for (i = 0; length; i += 4, length -= 4) {
874 u32 val = tg3_ape_read32(tp, msgoff + i);
875 memcpy(data, &val, sizeof(u32));
876 data++;
877 }
878 }
879
880 return 0;
881}
882
Matt Carlsonb65a3722012-07-16 16:24:00 +0000883static int tg3_ape_send_event(struct tg3 *tp, u32 event)
884{
885 int err;
886 u32 apedata;
887
888 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
889 if (apedata != APE_SEG_SIG_MAGIC)
890 return -EAGAIN;
891
892 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
893 if (!(apedata & APE_FW_STATUS_READY))
894 return -EAGAIN;
895
896 /* Wait for up to 1 millisecond for APE to service previous event. */
897 err = tg3_ape_event_lock(tp, 1000);
898 if (err)
899 return err;
900
901 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
902 event | APE_EVENT_STATUS_EVENT_PENDING);
903
904 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
905 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
906
907 return 0;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000908}
909
910static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
911{
912 u32 event;
913 u32 apedata;
914
915 if (!tg3_flag(tp, ENABLE_APE))
916 return;
917
918 switch (kind) {
919 case RESET_KIND_INIT:
920 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
921 APE_HOST_SEG_SIG_MAGIC);
922 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
923 APE_HOST_SEG_LEN_MAGIC);
924 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
925 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
926 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
927 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
928 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
929 APE_HOST_BEHAV_NO_PHYLOCK);
930 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
931 TG3_APE_HOST_DRVR_STATE_START);
932
933 event = APE_EVENT_STATUS_STATE_START;
934 break;
935 case RESET_KIND_SHUTDOWN:
936 /* With the interface we are currently using,
937 * APE does not track driver state. Wiping
938 * out the HOST SEGMENT SIGNATURE forces
939 * the APE to assume OS absent status.
940 */
941 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
942
943 if (device_may_wakeup(&tp->pdev->dev) &&
944 tg3_flag(tp, WOL_ENABLE)) {
945 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
946 TG3_APE_HOST_WOL_SPEED_AUTO);
947 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
948 } else
949 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
950
951 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
952
953 event = APE_EVENT_STATUS_STATE_UNLOAD;
954 break;
955 case RESET_KIND_SUSPEND:
956 event = APE_EVENT_STATUS_STATE_SUSPEND;
957 break;
958 default:
959 return;
960 }
961
962 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
963
964 tg3_ape_send_event(tp, event);
965}
966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967static void tg3_disable_ints(struct tg3 *tp)
968{
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000969 int i;
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 tw32(TG3PCI_MISC_HOST_CTRL,
972 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000973 for (i = 0; i < tp->irq_max; i++)
974 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977static void tg3_enable_ints(struct tg3 *tp)
978{
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000979 int i;
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000980
Michael Chanbbe832c2005-06-24 20:20:04 -0700981 tp->irq_sync = 0;
982 wmb();
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Matt Carlsonf19af9c2009-09-01 12:47:49 +0000986
Matt Carlsonf89f38b2010-02-12 14:47:07 +0000987 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000988 for (i = 0; i < tp->irq_cnt; i++) {
989 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000990
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000991 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
Joe Perches63c3a662011-04-26 08:12:10 +0000992 if (tg3_flag(tp, 1SHOT_MSI))
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000993 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
994
Matt Carlsonf89f38b2010-02-12 14:47:07 +0000995 tp->coal_now |= tnapi->coal_now;
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000996 }
Matt Carlsonf19af9c2009-09-01 12:47:49 +0000997
998 /* Force an initial interrupt */
Joe Perches63c3a662011-04-26 08:12:10 +0000999 if (!tg3_flag(tp, TAGGED_STATUS) &&
Matt Carlsonf19af9c2009-09-01 12:47:49 +00001000 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1001 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1002 else
Matt Carlsonf89f38b2010-02-12 14:47:07 +00001003 tw32(HOSTCC_MODE, tp->coal_now);
1004
1005 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006}
1007
Matt Carlson17375d22009-08-28 14:02:18 +00001008static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
Michael Chan04237dd2005-04-25 15:17:17 -07001009{
Matt Carlson17375d22009-08-28 14:02:18 +00001010 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00001011 struct tg3_hw_status *sblk = tnapi->hw_status;
Michael Chan04237dd2005-04-25 15:17:17 -07001012 unsigned int work_exists = 0;
1013
1014 /* check for phy events */
Joe Perches63c3a662011-04-26 08:12:10 +00001015 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
Michael Chan04237dd2005-04-25 15:17:17 -07001016 if (sblk->status & SD_STATUS_LINK_CHG)
1017 work_exists = 1;
1018 }
Matt Carlsonf891ea12012-04-24 13:37:01 +00001019
1020 /* check for TX work to do */
1021 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1022 work_exists = 1;
1023
1024 /* check for RX work to do */
1025 if (tnapi->rx_rcb_prod_idx &&
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00001026 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
Michael Chan04237dd2005-04-25 15:17:17 -07001027 work_exists = 1;
1028
1029 return work_exists;
1030}
1031
Matt Carlson17375d22009-08-28 14:02:18 +00001032/* tg3_int_reenable
Michael Chan04237dd2005-04-25 15:17:17 -07001033 * similar to tg3_enable_ints, but it accurately determines whether there
1034 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001035 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 */
Matt Carlson17375d22009-08-28 14:02:18 +00001037static void tg3_int_reenable(struct tg3_napi *tnapi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
Matt Carlson17375d22009-08-28 14:02:18 +00001039 struct tg3 *tp = tnapi->tp;
1040
Matt Carlson898a56f2009-08-28 14:02:40 +00001041 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 mmiowb();
1043
David S. Millerfac9b832005-05-18 22:46:34 -07001044 /* When doing tagged status, this work check is unnecessary.
1045 * The last_tag we write above tells the chip which piece of
1046 * work we've completed.
1047 */
Joe Perches63c3a662011-04-26 08:12:10 +00001048 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
Michael Chan04237dd2005-04-25 15:17:17 -07001049 tw32(HOSTCC_MODE, tp->coalesce_mode |
Matt Carlsonfd2ce372009-09-01 12:51:13 +00001050 HOSTCC_MODE_ENABLE | tnapi->coal_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051}
1052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053static void tg3_switch_clocks(struct tg3 *tp)
1054{
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00001055 u32 clock_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 u32 orig_clock_ctrl;
1057
Joe Perches63c3a662011-04-26 08:12:10 +00001058 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -07001059 return;
1060
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00001061 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 orig_clock_ctrl = clock_ctrl;
1064 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1065 CLOCK_CTRL_CLKRUN_OENABLE |
1066 0x1f);
1067 tp->pci_clock_ctrl = clock_ctrl;
1068
Joe Perches63c3a662011-04-26 08:12:10 +00001069 if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001071 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1072 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 }
1074 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001075 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1076 clock_ctrl |
1077 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1078 40);
1079 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1080 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1081 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001083 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084}
1085
1086#define PHY_BUSY_LOOPS 5000
1087
1088static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1089{
1090 u32 frame_val;
1091 unsigned int loops;
1092 int ret;
1093
1094 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1095 tw32_f(MAC_MI_MODE,
1096 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1097 udelay(80);
1098 }
1099
Michael Chan8151ad52012-07-29 19:15:41 +00001100 tg3_ape_lock(tp, tp->phy_ape_lock);
1101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 *val = 0x0;
1103
Matt Carlson882e9792009-09-01 13:21:36 +00001104 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 MI_COM_PHY_ADDR_MASK);
1106 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1107 MI_COM_REG_ADDR_MASK);
1108 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 tw32_f(MAC_MI_COM, frame_val);
1111
1112 loops = PHY_BUSY_LOOPS;
1113 while (loops != 0) {
1114 udelay(10);
1115 frame_val = tr32(MAC_MI_COM);
1116
1117 if ((frame_val & MI_COM_BUSY) == 0) {
1118 udelay(5);
1119 frame_val = tr32(MAC_MI_COM);
1120 break;
1121 }
1122 loops -= 1;
1123 }
1124
1125 ret = -EBUSY;
1126 if (loops != 0) {
1127 *val = frame_val & MI_COM_DATA_MASK;
1128 ret = 0;
1129 }
1130
1131 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1132 tw32_f(MAC_MI_MODE, tp->mi_mode);
1133 udelay(80);
1134 }
1135
Michael Chan8151ad52012-07-29 19:15:41 +00001136 tg3_ape_unlock(tp, tp->phy_ape_lock);
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 return ret;
1139}
1140
1141static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1142{
1143 u32 frame_val;
1144 unsigned int loops;
1145 int ret;
1146
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001147 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
Matt Carlson221c5632011-06-13 13:39:01 +00001148 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
Michael Chanb5d37722006-09-27 16:06:21 -07001149 return 0;
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152 tw32_f(MAC_MI_MODE,
1153 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1154 udelay(80);
1155 }
1156
Michael Chan8151ad52012-07-29 19:15:41 +00001157 tg3_ape_lock(tp, tp->phy_ape_lock);
1158
Matt Carlson882e9792009-09-01 13:21:36 +00001159 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 MI_COM_PHY_ADDR_MASK);
1161 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1162 MI_COM_REG_ADDR_MASK);
1163 frame_val |= (val & MI_COM_DATA_MASK);
1164 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 tw32_f(MAC_MI_COM, frame_val);
1167
1168 loops = PHY_BUSY_LOOPS;
1169 while (loops != 0) {
1170 udelay(10);
1171 frame_val = tr32(MAC_MI_COM);
1172 if ((frame_val & MI_COM_BUSY) == 0) {
1173 udelay(5);
1174 frame_val = tr32(MAC_MI_COM);
1175 break;
1176 }
1177 loops -= 1;
1178 }
1179
1180 ret = -EBUSY;
1181 if (loops != 0)
1182 ret = 0;
1183
1184 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1185 tw32_f(MAC_MI_MODE, tp->mi_mode);
1186 udelay(80);
1187 }
1188
Michael Chan8151ad52012-07-29 19:15:41 +00001189 tg3_ape_unlock(tp, tp->phy_ape_lock);
1190
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 return ret;
1192}
1193
Matt Carlsonb0988c12011-04-20 07:57:39 +00001194static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1195{
1196 int err;
1197
1198 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1199 if (err)
1200 goto done;
1201
1202 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1203 if (err)
1204 goto done;
1205
1206 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1207 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1208 if (err)
1209 goto done;
1210
1211 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1212
1213done:
1214 return err;
1215}
1216
1217static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1218{
1219 int err;
1220
1221 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1222 if (err)
1223 goto done;
1224
1225 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1226 if (err)
1227 goto done;
1228
1229 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1230 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1231 if (err)
1232 goto done;
1233
1234 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1235
1236done:
1237 return err;
1238}
1239
1240static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1241{
1242 int err;
1243
1244 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1245 if (!err)
1246 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1247
1248 return err;
1249}
1250
1251static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1252{
1253 int err;
1254
1255 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1256 if (!err)
1257 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1258
1259 return err;
1260}
1261
Matt Carlson15ee95c2011-04-20 07:57:40 +00001262static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1263{
1264 int err;
1265
1266 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1267 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1268 MII_TG3_AUXCTL_SHDWSEL_MISC);
1269 if (!err)
1270 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1271
1272 return err;
1273}
1274
Matt Carlsonb4bd2922011-04-20 07:57:41 +00001275static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1276{
1277 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1278 set |= MII_TG3_AUXCTL_MISC_WREN;
1279
1280 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1281}
1282
Matt Carlson1d36ba42011-04-20 07:57:42 +00001283#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1284 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1285 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1286 MII_TG3_AUXCTL_ACTL_TX_6DB)
1287
1288#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1289 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1290 MII_TG3_AUXCTL_ACTL_TX_6DB);
1291
Matt Carlson95e28692008-05-25 23:44:14 -07001292static int tg3_bmcr_reset(struct tg3 *tp)
1293{
1294 u32 phy_control;
1295 int limit, err;
1296
1297 /* OK, reset it, and poll the BMCR_RESET bit until it
1298 * clears or we time out.
1299 */
1300 phy_control = BMCR_RESET;
1301 err = tg3_writephy(tp, MII_BMCR, phy_control);
1302 if (err != 0)
1303 return -EBUSY;
1304
1305 limit = 5000;
1306 while (limit--) {
1307 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1308 if (err != 0)
1309 return -EBUSY;
1310
1311 if ((phy_control & BMCR_RESET) == 0) {
1312 udelay(40);
1313 break;
1314 }
1315 udelay(10);
1316 }
Roel Kluind4675b52009-02-12 16:33:27 -08001317 if (limit < 0)
Matt Carlson95e28692008-05-25 23:44:14 -07001318 return -EBUSY;
1319
1320 return 0;
1321}
1322
Matt Carlson158d7ab2008-05-29 01:37:54 -07001323static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1324{
Francois Romieu3d165432009-01-19 16:56:50 -08001325 struct tg3 *tp = bp->priv;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001326 u32 val;
1327
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001328 spin_lock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001329
1330 if (tg3_readphy(tp, reg, &val))
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001331 val = -EIO;
1332
1333 spin_unlock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001334
1335 return val;
1336}
1337
1338static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1339{
Francois Romieu3d165432009-01-19 16:56:50 -08001340 struct tg3 *tp = bp->priv;
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001341 u32 ret = 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001342
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001343 spin_lock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001344
1345 if (tg3_writephy(tp, reg, val))
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001346 ret = -EIO;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001347
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001348 spin_unlock_bh(&tp->lock);
1349
1350 return ret;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001351}
1352
1353static int tg3_mdio_reset(struct mii_bus *bp)
1354{
1355 return 0;
1356}
1357
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001358static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -07001359{
1360 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001361 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -07001362
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001363 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001364 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlson6a443a02010-02-17 15:17:04 +00001365 case PHY_ID_BCM50610:
1366 case PHY_ID_BCM50610M:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001367 val = MAC_PHYCFG2_50610_LED_MODES;
1368 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001369 case PHY_ID_BCMAC131:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001370 val = MAC_PHYCFG2_AC131_LED_MODES;
1371 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001372 case PHY_ID_RTL8211C:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001373 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1374 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001375 case PHY_ID_RTL8201E:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001376 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1377 break;
1378 default:
Matt Carlsona9daf362008-05-25 23:49:44 -07001379 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001380 }
1381
1382 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1383 tw32(MAC_PHYCFG2, val);
1384
1385 val = tr32(MAC_PHYCFG1);
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001386 val &= ~(MAC_PHYCFG1_RGMII_INT |
1387 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1388 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001389 tw32(MAC_PHYCFG1, val);
1390
1391 return;
1392 }
1393
Joe Perches63c3a662011-04-26 08:12:10 +00001394 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001395 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1396 MAC_PHYCFG2_FMODE_MASK_MASK |
1397 MAC_PHYCFG2_GMODE_MASK_MASK |
1398 MAC_PHYCFG2_ACT_MASK_MASK |
1399 MAC_PHYCFG2_QUAL_MASK_MASK |
1400 MAC_PHYCFG2_INBAND_ENABLE;
1401
1402 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -07001403
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001404 val = tr32(MAC_PHYCFG1);
1405 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1406 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
Joe Perches63c3a662011-04-26 08:12:10 +00001407 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1408 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001409 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
Joe Perches63c3a662011-04-26 08:12:10 +00001410 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001411 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1412 }
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001413 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1414 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1415 tw32(MAC_PHYCFG1, val);
Matt Carlsona9daf362008-05-25 23:49:44 -07001416
Matt Carlsona9daf362008-05-25 23:49:44 -07001417 val = tr32(MAC_EXT_RGMII_MODE);
1418 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1419 MAC_RGMII_MODE_RX_QUALITY |
1420 MAC_RGMII_MODE_RX_ACTIVITY |
1421 MAC_RGMII_MODE_RX_ENG_DET |
1422 MAC_RGMII_MODE_TX_ENABLE |
1423 MAC_RGMII_MODE_TX_LOWPWR |
1424 MAC_RGMII_MODE_TX_RESET);
Joe Perches63c3a662011-04-26 08:12:10 +00001425 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1426 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001427 val |= MAC_RGMII_MODE_RX_INT_B |
1428 MAC_RGMII_MODE_RX_QUALITY |
1429 MAC_RGMII_MODE_RX_ACTIVITY |
1430 MAC_RGMII_MODE_RX_ENG_DET;
Joe Perches63c3a662011-04-26 08:12:10 +00001431 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001432 val |= MAC_RGMII_MODE_TX_ENABLE |
1433 MAC_RGMII_MODE_TX_LOWPWR |
1434 MAC_RGMII_MODE_TX_RESET;
1435 }
1436 tw32(MAC_EXT_RGMII_MODE, val);
1437}
1438
Matt Carlson158d7ab2008-05-29 01:37:54 -07001439static void tg3_mdio_start(struct tg3 *tp)
1440{
Matt Carlson158d7ab2008-05-29 01:37:54 -07001441 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1442 tw32_f(MAC_MI_MODE, tp->mi_mode);
1443 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -07001444
Joe Perches63c3a662011-04-26 08:12:10 +00001445 if (tg3_flag(tp, MDIOBUS_INITED) &&
Matt Carlson9ea48182010-02-17 15:17:01 +00001446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1447 tg3_mdio_config_5785(tp);
1448}
1449
1450static int tg3_mdio_init(struct tg3 *tp)
1451{
1452 int i;
1453 u32 reg;
1454 struct phy_device *phydev;
1455
Joe Perches63c3a662011-04-26 08:12:10 +00001456 if (tg3_flag(tp, 5717_PLUS)) {
Matt Carlson9c7df912010-06-05 17:24:36 +00001457 u32 is_serdes;
Matt Carlson882e9792009-09-01 13:21:36 +00001458
Matt Carlson69f11c92011-07-13 09:27:30 +00001459 tp->phy_addr = tp->pci_fn + 1;
Matt Carlson882e9792009-09-01 13:21:36 +00001460
Matt Carlsond1ec96a2010-01-12 10:11:38 +00001461 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1462 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1463 else
1464 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1465 TG3_CPMU_PHY_STRAP_IS_SERDES;
Matt Carlson882e9792009-09-01 13:21:36 +00001466 if (is_serdes)
1467 tp->phy_addr += 7;
1468 } else
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001469 tp->phy_addr = TG3_PHY_MII_ADDR;
Matt Carlson882e9792009-09-01 13:21:36 +00001470
Matt Carlson158d7ab2008-05-29 01:37:54 -07001471 tg3_mdio_start(tp);
1472
Joe Perches63c3a662011-04-26 08:12:10 +00001473 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
Matt Carlson158d7ab2008-05-29 01:37:54 -07001474 return 0;
1475
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001476 tp->mdio_bus = mdiobus_alloc();
1477 if (tp->mdio_bus == NULL)
1478 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001479
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001480 tp->mdio_bus->name = "tg3 mdio bus";
1481 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -07001482 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001483 tp->mdio_bus->priv = tp;
1484 tp->mdio_bus->parent = &tp->pdev->dev;
1485 tp->mdio_bus->read = &tg3_mdio_read;
1486 tp->mdio_bus->write = &tg3_mdio_write;
1487 tp->mdio_bus->reset = &tg3_mdio_reset;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001488 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001489 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001490
1491 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001492 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001493
1494 /* The bus registration will look for all the PHYs on the mdio bus.
1495 * Unfortunately, it does not ensure the PHY is powered up before
1496 * accessing the PHY ID registers. A chip reset is the
1497 * quickest way to bring the device back to an operational state..
1498 */
1499 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1500 tg3_bmcr_reset(tp);
1501
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001502 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001503 if (i) {
Matt Carlsonab96b242010-04-05 10:19:22 +00001504 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001505 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001506 return i;
1507 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001508
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001509 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001510
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001511 if (!phydev || !phydev->drv) {
Matt Carlsonab96b242010-04-05 10:19:22 +00001512 dev_warn(&tp->pdev->dev, "No PHY devices\n");
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001513 mdiobus_unregister(tp->mdio_bus);
1514 mdiobus_free(tp->mdio_bus);
1515 return -ENODEV;
1516 }
1517
1518 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlson6a443a02010-02-17 15:17:04 +00001519 case PHY_ID_BCM57780:
Matt Carlson321d32a2008-11-21 17:22:19 -08001520 phydev->interface = PHY_INTERFACE_MODE_GMII;
Matt Carlsonc704dc22009-11-02 14:32:12 +00001521 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
Matt Carlson321d32a2008-11-21 17:22:19 -08001522 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001523 case PHY_ID_BCM50610:
1524 case PHY_ID_BCM50610M:
Matt Carlson32e5a8d2009-11-02 14:31:39 +00001525 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
Matt Carlsonc704dc22009-11-02 14:32:12 +00001526 PHY_BRCM_RX_REFCLK_UNUSED |
Matt Carlson52fae082009-11-02 14:32:38 +00001527 PHY_BRCM_DIS_TXCRXC_NOENRGY |
Matt Carlsonc704dc22009-11-02 14:32:12 +00001528 PHY_BRCM_AUTO_PWRDWN_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001529 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
Matt Carlsona9daf362008-05-25 23:49:44 -07001530 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001531 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001532 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001533 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001534 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001535 /* fallthru */
Matt Carlson6a443a02010-02-17 15:17:04 +00001536 case PHY_ID_RTL8211C:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001537 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001538 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001539 case PHY_ID_RTL8201E:
1540 case PHY_ID_BCMAC131:
Matt Carlsona9daf362008-05-25 23:49:44 -07001541 phydev->interface = PHY_INTERFACE_MODE_MII;
Matt Carlsoncdd4e09d2009-11-02 14:31:11 +00001542 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001543 tp->phy_flags |= TG3_PHYFLG_IS_FET;
Matt Carlsona9daf362008-05-25 23:49:44 -07001544 break;
1545 }
1546
Joe Perches63c3a662011-04-26 08:12:10 +00001547 tg3_flag_set(tp, MDIOBUS_INITED);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001548
1549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1550 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001551
1552 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001553}
1554
1555static void tg3_mdio_fini(struct tg3 *tp)
1556{
Joe Perches63c3a662011-04-26 08:12:10 +00001557 if (tg3_flag(tp, MDIOBUS_INITED)) {
1558 tg3_flag_clear(tp, MDIOBUS_INITED);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001559 mdiobus_unregister(tp->mdio_bus);
1560 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001561 }
1562}
1563
Matt Carlson95e28692008-05-25 23:44:14 -07001564/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001565static inline void tg3_generate_fw_event(struct tg3 *tp)
1566{
1567 u32 val;
1568
1569 val = tr32(GRC_RX_CPU_EVENT);
1570 val |= GRC_RX_CPU_DRIVER_EVENT;
1571 tw32_f(GRC_RX_CPU_EVENT, val);
1572
1573 tp->last_event_jiffies = jiffies;
1574}
1575
1576#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1577
1578/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001579static void tg3_wait_for_event_ack(struct tg3 *tp)
1580{
1581 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001582 unsigned int delay_cnt;
1583 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001584
Matt Carlson4ba526c2008-08-15 14:10:04 -07001585 /* If enough time has passed, no wait is necessary. */
1586 time_remain = (long)(tp->last_event_jiffies + 1 +
1587 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1588 (long)jiffies;
1589 if (time_remain < 0)
1590 return;
1591
1592 /* Check if we can shorten the wait time. */
1593 delay_cnt = jiffies_to_usecs(time_remain);
1594 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1595 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1596 delay_cnt = (delay_cnt >> 3) + 1;
1597
1598 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001599 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1600 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001601 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001602 }
1603}
1604
1605/* tp->lock is held. */
Matt Carlsonb28f3892012-02-13 15:20:12 +00001606static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
Matt Carlson95e28692008-05-25 23:44:14 -07001607{
Matt Carlsonb28f3892012-02-13 15:20:12 +00001608 u32 reg, val;
Matt Carlson95e28692008-05-25 23:44:14 -07001609
1610 val = 0;
1611 if (!tg3_readphy(tp, MII_BMCR, &reg))
1612 val = reg << 16;
1613 if (!tg3_readphy(tp, MII_BMSR, &reg))
1614 val |= (reg & 0xffff);
Matt Carlsonb28f3892012-02-13 15:20:12 +00001615 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001616
1617 val = 0;
1618 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1619 val = reg << 16;
1620 if (!tg3_readphy(tp, MII_LPA, &reg))
1621 val |= (reg & 0xffff);
Matt Carlsonb28f3892012-02-13 15:20:12 +00001622 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001623
1624 val = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001625 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
Matt Carlson95e28692008-05-25 23:44:14 -07001626 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1627 val = reg << 16;
1628 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1629 val |= (reg & 0xffff);
1630 }
Matt Carlsonb28f3892012-02-13 15:20:12 +00001631 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001632
1633 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1634 val = reg << 16;
1635 else
1636 val = 0;
Matt Carlsonb28f3892012-02-13 15:20:12 +00001637 *data++ = val;
1638}
1639
1640/* tp->lock is held. */
1641static void tg3_ump_link_report(struct tg3 *tp)
1642{
1643 u32 data[4];
1644
1645 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1646 return;
1647
1648 tg3_phy_gather_ump_data(tp, data);
1649
1650 tg3_wait_for_event_ack(tp);
1651
1652 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1653 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1654 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1655 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1656 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1657 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
Matt Carlson95e28692008-05-25 23:44:14 -07001658
Matt Carlson4ba526c2008-08-15 14:10:04 -07001659 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001660}
1661
Matt Carlson8d5a89b2011-08-31 11:44:51 +00001662/* tp->lock is held. */
1663static void tg3_stop_fw(struct tg3 *tp)
1664{
1665 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1666 /* Wait for RX cpu to ACK the previous event. */
1667 tg3_wait_for_event_ack(tp);
1668
1669 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1670
1671 tg3_generate_fw_event(tp);
1672
1673 /* Wait for RX cpu to ACK this event. */
1674 tg3_wait_for_event_ack(tp);
1675 }
1676}
1677
Matt Carlsonfd6d3f02011-08-31 11:44:52 +00001678/* tp->lock is held. */
1679static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1680{
1681 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1682 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1683
1684 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1685 switch (kind) {
1686 case RESET_KIND_INIT:
1687 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1688 DRV_STATE_START);
1689 break;
1690
1691 case RESET_KIND_SHUTDOWN:
1692 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1693 DRV_STATE_UNLOAD);
1694 break;
1695
1696 case RESET_KIND_SUSPEND:
1697 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1698 DRV_STATE_SUSPEND);
1699 break;
1700
1701 default:
1702 break;
1703 }
1704 }
1705
1706 if (kind == RESET_KIND_INIT ||
1707 kind == RESET_KIND_SUSPEND)
1708 tg3_ape_driver_state_change(tp, kind);
1709}
1710
1711/* tp->lock is held. */
1712static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1713{
1714 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1715 switch (kind) {
1716 case RESET_KIND_INIT:
1717 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1718 DRV_STATE_START_DONE);
1719 break;
1720
1721 case RESET_KIND_SHUTDOWN:
1722 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1723 DRV_STATE_UNLOAD_DONE);
1724 break;
1725
1726 default:
1727 break;
1728 }
1729 }
1730
1731 if (kind == RESET_KIND_SHUTDOWN)
1732 tg3_ape_driver_state_change(tp, kind);
1733}
1734
1735/* tp->lock is held. */
1736static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1737{
1738 if (tg3_flag(tp, ENABLE_ASF)) {
1739 switch (kind) {
1740 case RESET_KIND_INIT:
1741 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 DRV_STATE_START);
1743 break;
1744
1745 case RESET_KIND_SHUTDOWN:
1746 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 DRV_STATE_UNLOAD);
1748 break;
1749
1750 case RESET_KIND_SUSPEND:
1751 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752 DRV_STATE_SUSPEND);
1753 break;
1754
1755 default:
1756 break;
1757 }
1758 }
1759}
1760
1761static int tg3_poll_fw(struct tg3 *tp)
1762{
1763 int i;
1764 u32 val;
1765
1766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1767 /* Wait up to 20ms for init done. */
1768 for (i = 0; i < 200; i++) {
1769 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1770 return 0;
1771 udelay(100);
1772 }
1773 return -ENODEV;
1774 }
1775
1776 /* Wait for firmware initialization to complete. */
1777 for (i = 0; i < 100000; i++) {
1778 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1779 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1780 break;
1781 udelay(10);
1782 }
1783
1784 /* Chip might not be fitted with firmware. Some Sun onboard
1785 * parts are configured like that. So don't signal the timeout
1786 * of the above loop as an error, but do report the lack of
1787 * running firmware once.
1788 */
1789 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1790 tg3_flag_set(tp, NO_FWARE_REPORTED);
1791
1792 netdev_info(tp->dev, "No firmware running\n");
1793 }
1794
1795 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1796 /* The 57765 A0 needs a little more
1797 * time to do some important work.
1798 */
1799 mdelay(10);
1800 }
1801
1802 return 0;
1803}
1804
Matt Carlson95e28692008-05-25 23:44:14 -07001805static void tg3_link_report(struct tg3 *tp)
1806{
1807 if (!netif_carrier_ok(tp->dev)) {
Joe Perches05dbe002010-02-17 19:44:19 +00001808 netif_info(tp, link, tp->dev, "Link is down\n");
Matt Carlson95e28692008-05-25 23:44:14 -07001809 tg3_ump_link_report(tp);
1810 } else if (netif_msg_link(tp)) {
Joe Perches05dbe002010-02-17 19:44:19 +00001811 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1812 (tp->link_config.active_speed == SPEED_1000 ?
1813 1000 :
1814 (tp->link_config.active_speed == SPEED_100 ?
1815 100 : 10)),
1816 (tp->link_config.active_duplex == DUPLEX_FULL ?
1817 "full" : "half"));
Matt Carlson95e28692008-05-25 23:44:14 -07001818
Joe Perches05dbe002010-02-17 19:44:19 +00001819 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1820 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1821 "on" : "off",
1822 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1823 "on" : "off");
Matt Carlson47007832011-04-20 07:57:43 +00001824
1825 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1826 netdev_info(tp->dev, "EEE is %s\n",
1827 tp->setlpicnt ? "enabled" : "disabled");
1828
Matt Carlson95e28692008-05-25 23:44:14 -07001829 tg3_ump_link_report(tp);
1830 }
1831}
1832
Matt Carlson95e28692008-05-25 23:44:14 -07001833static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1834{
1835 u16 miireg;
1836
Steve Glendinninge18ce342008-12-16 02:00:00 -08001837 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
Matt Carlson95e28692008-05-25 23:44:14 -07001838 miireg = ADVERTISE_1000XPAUSE;
Steve Glendinninge18ce342008-12-16 02:00:00 -08001839 else if (flow_ctrl & FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001840 miireg = ADVERTISE_1000XPSE_ASYM;
Steve Glendinninge18ce342008-12-16 02:00:00 -08001841 else if (flow_ctrl & FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001842 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1843 else
1844 miireg = 0;
1845
1846 return miireg;
1847}
1848
Matt Carlson95e28692008-05-25 23:44:14 -07001849static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1850{
1851 u8 cap = 0;
1852
Matt Carlsonf3791cd2011-11-21 15:01:17 +00001853 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1854 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1855 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1856 if (lcladv & ADVERTISE_1000XPAUSE)
1857 cap = FLOW_CTRL_RX;
1858 if (rmtadv & ADVERTISE_1000XPAUSE)
Steve Glendinninge18ce342008-12-16 02:00:00 -08001859 cap = FLOW_CTRL_TX;
Matt Carlson95e28692008-05-25 23:44:14 -07001860 }
1861
1862 return cap;
1863}
1864
Matt Carlsonf51f3562008-05-25 23:45:08 -07001865static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001866{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001867 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001868 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001869 u32 old_rx_mode = tp->rx_mode;
1870 u32 old_tx_mode = tp->tx_mode;
1871
Joe Perches63c3a662011-04-26 08:12:10 +00001872 if (tg3_flag(tp, USE_PHYLIB))
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001873 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001874 else
1875 autoneg = tp->link_config.autoneg;
1876
Joe Perches63c3a662011-04-26 08:12:10 +00001877 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001878 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001879 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001880 else
Steve Glendinningbc02ff92008-12-16 02:00:48 -08001881 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
Matt Carlsonf51f3562008-05-25 23:45:08 -07001882 } else
1883 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001884
Matt Carlsonf51f3562008-05-25 23:45:08 -07001885 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001886
Steve Glendinninge18ce342008-12-16 02:00:00 -08001887 if (flowctrl & FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001888 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1889 else
1890 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1891
Matt Carlsonf51f3562008-05-25 23:45:08 -07001892 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001893 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001894
Steve Glendinninge18ce342008-12-16 02:00:00 -08001895 if (flowctrl & FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001896 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1897 else
1898 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1899
Matt Carlsonf51f3562008-05-25 23:45:08 -07001900 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001901 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001902}
1903
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001904static void tg3_adjust_link(struct net_device *dev)
1905{
1906 u8 oldflowctrl, linkmesg = 0;
1907 u32 mac_mode, lcl_adv, rmt_adv;
1908 struct tg3 *tp = netdev_priv(dev);
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001909 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001910
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001911 spin_lock_bh(&tp->lock);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001912
1913 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1914 MAC_MODE_HALF_DUPLEX);
1915
1916 oldflowctrl = tp->link_config.active_flowctrl;
1917
1918 if (phydev->link) {
1919 lcl_adv = 0;
1920 rmt_adv = 0;
1921
1922 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1923 mac_mode |= MAC_MODE_PORT_MODE_MII;
Matt Carlsonc3df0742009-11-02 14:27:02 +00001924 else if (phydev->speed == SPEED_1000 ||
1925 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001926 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Matt Carlsonc3df0742009-11-02 14:27:02 +00001927 else
1928 mac_mode |= MAC_MODE_PORT_MODE_MII;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001929
1930 if (phydev->duplex == DUPLEX_HALF)
1931 mac_mode |= MAC_MODE_HALF_DUPLEX;
1932 else {
Matt Carlsonf88788f2011-12-14 11:10:00 +00001933 lcl_adv = mii_advertise_flowctrl(
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001934 tp->link_config.flowctrl);
1935
1936 if (phydev->pause)
1937 rmt_adv = LPA_PAUSE_CAP;
1938 if (phydev->asym_pause)
1939 rmt_adv |= LPA_PAUSE_ASYM;
1940 }
1941
1942 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1943 } else
1944 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1945
1946 if (mac_mode != tp->mac_mode) {
1947 tp->mac_mode = mac_mode;
1948 tw32_f(MAC_MODE, tp->mac_mode);
1949 udelay(40);
1950 }
1951
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1953 if (phydev->speed == SPEED_10)
1954 tw32(MAC_MI_STAT,
1955 MAC_MI_STAT_10MBPS_MODE |
1956 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1957 else
1958 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1959 }
1960
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001961 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1962 tw32(MAC_TX_LENGTHS,
1963 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1964 (6 << TX_LENGTHS_IPG_SHIFT) |
1965 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1966 else
1967 tw32(MAC_TX_LENGTHS,
1968 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1969 (6 << TX_LENGTHS_IPG_SHIFT) |
1970 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1971
Matt Carlson34655ad2012-02-22 12:35:18 +00001972 if (phydev->link != tp->old_link ||
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001973 phydev->speed != tp->link_config.active_speed ||
1974 phydev->duplex != tp->link_config.active_duplex ||
1975 oldflowctrl != tp->link_config.active_flowctrl)
Matt Carlsonc6cdf432010-04-05 10:19:26 +00001976 linkmesg = 1;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001977
Matt Carlson34655ad2012-02-22 12:35:18 +00001978 tp->old_link = phydev->link;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001979 tp->link_config.active_speed = phydev->speed;
1980 tp->link_config.active_duplex = phydev->duplex;
1981
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001982 spin_unlock_bh(&tp->lock);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001983
1984 if (linkmesg)
1985 tg3_link_report(tp);
1986}
1987
1988static int tg3_phy_init(struct tg3 *tp)
1989{
1990 struct phy_device *phydev;
1991
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001992 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001993 return 0;
1994
1995 /* Bring the PHY back to a known state. */
1996 tg3_bmcr_reset(tp);
1997
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001998 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001999
2000 /* Attach the MAC to the PHY. */
Kay Sieversfb28ad32008-11-10 13:55:14 -08002001 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
Matt Carlsona9daf362008-05-25 23:49:44 -07002002 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002003 if (IS_ERR(phydev)) {
Matt Carlsonab96b242010-04-05 10:19:22 +00002004 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002005 return PTR_ERR(phydev);
2006 }
2007
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002008 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002009 switch (phydev->interface) {
2010 case PHY_INTERFACE_MODE_GMII:
2011 case PHY_INTERFACE_MODE_RGMII:
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002012 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
Matt Carlson321d32a2008-11-21 17:22:19 -08002013 phydev->supported &= (PHY_GBIT_FEATURES |
2014 SUPPORTED_Pause |
2015 SUPPORTED_Asym_Pause);
2016 break;
2017 }
2018 /* fallthru */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002019 case PHY_INTERFACE_MODE_MII:
2020 phydev->supported &= (PHY_BASIC_FEATURES |
2021 SUPPORTED_Pause |
2022 SUPPORTED_Asym_Pause);
2023 break;
2024 default:
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002025 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002026 return -EINVAL;
2027 }
2028
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002029 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002030
2031 phydev->advertising = phydev->supported;
2032
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002033 return 0;
2034}
2035
2036static void tg3_phy_start(struct tg3 *tp)
2037{
2038 struct phy_device *phydev;
2039
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002040 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002041 return;
2042
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002043 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002044
Matt Carlson80096062010-08-02 11:26:06 +00002045 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2046 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
Matt Carlsonc6700ce2012-02-13 15:20:15 +00002047 phydev->speed = tp->link_config.speed;
2048 phydev->duplex = tp->link_config.duplex;
2049 phydev->autoneg = tp->link_config.autoneg;
2050 phydev->advertising = tp->link_config.advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002051 }
2052
2053 phy_start(phydev);
2054
2055 phy_start_aneg(phydev);
2056}
2057
2058static void tg3_phy_stop(struct tg3 *tp)
2059{
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002060 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002061 return;
2062
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002063 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002064}
2065
2066static void tg3_phy_fini(struct tg3 *tp)
2067{
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002068 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002069 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002070 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002071 }
2072}
2073
Matt Carlson941ec902011-08-19 13:58:23 +00002074static int tg3_phy_set_extloopbk(struct tg3 *tp)
2075{
2076 int err;
2077 u32 val;
2078
2079 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2080 return 0;
2081
2082 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2083 /* Cannot do read-modify-write on 5401 */
2084 err = tg3_phy_auxctl_write(tp,
2085 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2086 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2087 0x4c20);
2088 goto done;
2089 }
2090
2091 err = tg3_phy_auxctl_read(tp,
2092 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2093 if (err)
2094 return err;
2095
2096 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2097 err = tg3_phy_auxctl_write(tp,
2098 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2099
2100done:
2101 return err;
2102}
2103
Matt Carlson7f97a4b2009-08-25 10:10:03 +00002104static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2105{
2106 u32 phytest;
2107
2108 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2109 u32 phy;
2110
2111 tg3_writephy(tp, MII_TG3_FET_TEST,
2112 phytest | MII_TG3_FET_SHADOW_EN);
2113 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2114 if (enable)
2115 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2116 else
2117 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2118 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2119 }
2120 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2121 }
2122}
2123
Matt Carlson6833c042008-11-21 17:18:59 -08002124static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2125{
2126 u32 reg;
2127
Joe Perches63c3a662011-04-26 08:12:10 +00002128 if (!tg3_flag(tp, 5705_PLUS) ||
2129 (tg3_flag(tp, 5717_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002130 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
Matt Carlson6833c042008-11-21 17:18:59 -08002131 return;
2132
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002133 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson7f97a4b2009-08-25 10:10:03 +00002134 tg3_phy_fet_toggle_apd(tp, enable);
2135 return;
2136 }
2137
Matt Carlson6833c042008-11-21 17:18:59 -08002138 reg = MII_TG3_MISC_SHDW_WREN |
2139 MII_TG3_MISC_SHDW_SCR5_SEL |
2140 MII_TG3_MISC_SHDW_SCR5_LPED |
2141 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2142 MII_TG3_MISC_SHDW_SCR5_SDTL |
2143 MII_TG3_MISC_SHDW_SCR5_C125OE;
2144 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2145 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2146
2147 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2148
2149
2150 reg = MII_TG3_MISC_SHDW_WREN |
2151 MII_TG3_MISC_SHDW_APD_SEL |
2152 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2153 if (enable)
2154 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2155
2156 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2157}
2158
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002159static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2160{
2161 u32 phy;
2162
Joe Perches63c3a662011-04-26 08:12:10 +00002163 if (!tg3_flag(tp, 5705_PLUS) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002164 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002165 return;
2166
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002167 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002168 u32 ephy;
2169
Matt Carlson535ef6e2009-08-25 10:09:36 +00002170 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2171 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2172
2173 tg3_writephy(tp, MII_TG3_FET_TEST,
2174 ephy | MII_TG3_FET_SHADOW_EN);
2175 if (!tg3_readphy(tp, reg, &phy)) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002176 if (enable)
Matt Carlson535ef6e2009-08-25 10:09:36 +00002177 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002178 else
Matt Carlson535ef6e2009-08-25 10:09:36 +00002179 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2180 tg3_writephy(tp, reg, phy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002181 }
Matt Carlson535ef6e2009-08-25 10:09:36 +00002182 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002183 }
2184 } else {
Matt Carlson15ee95c2011-04-20 07:57:40 +00002185 int ret;
2186
2187 ret = tg3_phy_auxctl_read(tp,
2188 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2189 if (!ret) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002190 if (enable)
2191 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2192 else
2193 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002194 tg3_phy_auxctl_write(tp,
2195 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002196 }
2197 }
2198}
2199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200static void tg3_phy_set_wirespeed(struct tg3 *tp)
2201{
Matt Carlson15ee95c2011-04-20 07:57:40 +00002202 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 u32 val;
2204
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002205 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 return;
2207
Matt Carlson15ee95c2011-04-20 07:57:40 +00002208 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2209 if (!ret)
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002210 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2211 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212}
2213
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002214static void tg3_phy_apply_otp(struct tg3 *tp)
2215{
2216 u32 otp, phy;
2217
2218 if (!tp->phy_otp)
2219 return;
2220
2221 otp = tp->phy_otp;
2222
Matt Carlson1d36ba42011-04-20 07:57:42 +00002223 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2224 return;
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002225
2226 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2227 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2228 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2229
2230 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2231 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2232 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2233
2234 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2235 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2236 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2237
2238 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2239 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2240
2241 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2242 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2243
2244 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2245 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2246 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2247
Matt Carlson1d36ba42011-04-20 07:57:42 +00002248 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002249}
2250
Matt Carlson52b02d02010-10-14 10:37:41 +00002251static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2252{
2253 u32 val;
2254
2255 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2256 return;
2257
2258 tp->setlpicnt = 0;
2259
2260 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2261 current_link_up == 1 &&
Matt Carlsona6b68da2010-12-06 08:28:52 +00002262 tp->link_config.active_duplex == DUPLEX_FULL &&
2263 (tp->link_config.active_speed == SPEED_100 ||
2264 tp->link_config.active_speed == SPEED_1000)) {
Matt Carlson52b02d02010-10-14 10:37:41 +00002265 u32 eeectl;
2266
2267 if (tp->link_config.active_speed == SPEED_1000)
2268 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2269 else
2270 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2271
2272 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2273
Matt Carlson3110f5f52010-12-06 08:28:50 +00002274 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2275 TG3_CL45_D7_EEERES_STAT, &val);
Matt Carlson52b02d02010-10-14 10:37:41 +00002276
Matt Carlsonb0c59432011-05-19 12:12:48 +00002277 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2278 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
Matt Carlson52b02d02010-10-14 10:37:41 +00002279 tp->setlpicnt = 2;
2280 }
2281
2282 if (!tp->setlpicnt) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00002283 if (current_link_up == 1 &&
2284 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2285 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2286 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2287 }
2288
Matt Carlson52b02d02010-10-14 10:37:41 +00002289 val = tr32(TG3_CPMU_EEE_MODE);
2290 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2291 }
2292}
2293
Matt Carlsonb0c59432011-05-19 12:12:48 +00002294static void tg3_phy_eee_enable(struct tg3 *tp)
2295{
2296 u32 val;
2297
2298 if (tp->link_config.active_speed == SPEED_1000 &&
2299 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00002301 tg3_flag(tp, 57765_CLASS)) &&
Matt Carlsonb0c59432011-05-19 12:12:48 +00002302 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00002303 val = MII_TG3_DSP_TAP26_ALNOKO |
2304 MII_TG3_DSP_TAP26_RMRXSTO;
2305 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
Matt Carlsonb0c59432011-05-19 12:12:48 +00002306 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2307 }
2308
2309 val = tr32(TG3_CPMU_EEE_MODE);
2310 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2311}
2312
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313static int tg3_wait_macro_done(struct tg3 *tp)
2314{
2315 int limit = 100;
2316
2317 while (limit--) {
2318 u32 tmp32;
2319
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002320 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 if ((tmp32 & 0x1000) == 0)
2322 break;
2323 }
2324 }
Roel Kluind4675b52009-02-12 16:33:27 -08002325 if (limit < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 return -EBUSY;
2327
2328 return 0;
2329}
2330
2331static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2332{
2333 static const u32 test_pat[4][6] = {
2334 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2335 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2336 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2337 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2338 };
2339 int chan;
2340
2341 for (chan = 0; chan < 4; chan++) {
2342 int i;
2343
2344 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2345 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002346 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
2348 for (i = 0; i < 6; i++)
2349 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2350 test_pat[chan][i]);
2351
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002352 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 if (tg3_wait_macro_done(tp)) {
2354 *resetp = 1;
2355 return -EBUSY;
2356 }
2357
2358 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2359 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002360 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 if (tg3_wait_macro_done(tp)) {
2362 *resetp = 1;
2363 return -EBUSY;
2364 }
2365
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002366 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 if (tg3_wait_macro_done(tp)) {
2368 *resetp = 1;
2369 return -EBUSY;
2370 }
2371
2372 for (i = 0; i < 6; i += 2) {
2373 u32 low, high;
2374
2375 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2376 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2377 tg3_wait_macro_done(tp)) {
2378 *resetp = 1;
2379 return -EBUSY;
2380 }
2381 low &= 0x7fff;
2382 high &= 0x000f;
2383 if (low != test_pat[chan][i] ||
2384 high != test_pat[chan][i+1]) {
2385 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2386 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2387 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2388
2389 return -EBUSY;
2390 }
2391 }
2392 }
2393
2394 return 0;
2395}
2396
2397static int tg3_phy_reset_chanpat(struct tg3 *tp)
2398{
2399 int chan;
2400
2401 for (chan = 0; chan < 4; chan++) {
2402 int i;
2403
2404 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2405 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002406 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 for (i = 0; i < 6; i++)
2408 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002409 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 if (tg3_wait_macro_done(tp))
2411 return -EBUSY;
2412 }
2413
2414 return 0;
2415}
2416
2417static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2418{
2419 u32 reg32, phy9_orig;
2420 int retries, do_phy_reset, err;
2421
2422 retries = 10;
2423 do_phy_reset = 1;
2424 do {
2425 if (do_phy_reset) {
2426 err = tg3_bmcr_reset(tp);
2427 if (err)
2428 return err;
2429 do_phy_reset = 0;
2430 }
2431
2432 /* Disable transmitter and interrupt. */
2433 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2434 continue;
2435
2436 reg32 |= 0x3000;
2437 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2438
2439 /* Set full-duplex, 1000 mbps. */
2440 tg3_writephy(tp, MII_BMCR,
Matt Carlson221c5632011-06-13 13:39:01 +00002441 BMCR_FULLDPLX | BMCR_SPEED1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
2443 /* Set to master mode. */
Matt Carlson221c5632011-06-13 13:39:01 +00002444 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 continue;
2446
Matt Carlson221c5632011-06-13 13:39:01 +00002447 tg3_writephy(tp, MII_CTRL1000,
2448 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449
Matt Carlson1d36ba42011-04-20 07:57:42 +00002450 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2451 if (err)
2452 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
2454 /* Block the PHY control access. */
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002455 tg3_phydsp_write(tp, 0x8005, 0x0800);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
2457 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2458 if (!err)
2459 break;
2460 } while (--retries);
2461
2462 err = tg3_phy_reset_chanpat(tp);
2463 if (err)
2464 return err;
2465
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002466 tg3_phydsp_write(tp, 0x8005, 0x0000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
2468 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002469 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
Matt Carlson1d36ba42011-04-20 07:57:42 +00002471 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
Matt Carlson221c5632011-06-13 13:39:01 +00002473 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
2475 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2476 reg32 &= ~0x3000;
2477 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2478 } else if (!err)
2479 err = -EBUSY;
2480
2481 return err;
2482}
2483
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00002484static void tg3_carrier_on(struct tg3 *tp)
2485{
2486 netif_carrier_on(tp->dev);
2487 tp->link_up = true;
2488}
2489
2490static void tg3_carrier_off(struct tg3 *tp)
2491{
2492 netif_carrier_off(tp->dev);
2493 tp->link_up = false;
2494}
2495
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496/* This will reset the tigon3 PHY if there is no valid
2497 * link unless the FORCE argument is non-zero.
2498 */
2499static int tg3_phy_reset(struct tg3 *tp)
2500{
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002501 u32 val, cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 int err;
2503
Michael Chan60189dd2006-12-17 17:08:07 -08002504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002505 val = tr32(GRC_MISC_CFG);
2506 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2507 udelay(40);
2508 }
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002509 err = tg3_readphy(tp, MII_BMSR, &val);
2510 err |= tg3_readphy(tp, MII_BMSR, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 if (err != 0)
2512 return -EBUSY;
2513
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00002514 if (netif_running(tp->dev) && tp->link_up) {
2515 tg3_carrier_off(tp);
Michael Chanc8e1e822006-04-29 18:55:17 -07002516 tg3_link_report(tp);
2517 }
2518
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2522 err = tg3_phy_reset_5703_4_5(tp);
2523 if (err)
2524 return err;
2525 goto out;
2526 }
2527
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002528 cpmuctrl = 0;
2529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2530 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2531 cpmuctrl = tr32(TG3_CPMU_CTRL);
2532 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2533 tw32(TG3_CPMU_CTRL,
2534 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2535 }
2536
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 err = tg3_bmcr_reset(tp);
2538 if (err)
2539 return err;
2540
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002541 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002542 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2543 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002544
2545 tw32(TG3_CPMU_CTRL, cpmuctrl);
2546 }
2547
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2549 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002550 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2551 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2552 CPMU_LSPD_1000MB_MACCLK_12_5) {
2553 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2554 udelay(40);
2555 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2556 }
2557 }
2558
Joe Perches63c3a662011-04-26 08:12:10 +00002559 if (tg3_flag(tp, 5717_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002560 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
Matt Carlsonecf14102010-01-20 16:58:05 +00002561 return 0;
2562
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002563 tg3_phy_apply_otp(tp);
2564
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002565 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
Matt Carlson6833c042008-11-21 17:18:59 -08002566 tg3_phy_toggle_apd(tp, true);
2567 else
2568 tg3_phy_toggle_apd(tp, false);
2569
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570out:
Matt Carlson1d36ba42011-04-20 07:57:42 +00002571 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2572 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002573 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2574 tg3_phydsp_write(tp, 0x000a, 0x0323);
Matt Carlson1d36ba42011-04-20 07:57:42 +00002575 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002577
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002578 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002579 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2580 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002582
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002583 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
Matt Carlson1d36ba42011-04-20 07:57:42 +00002584 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2585 tg3_phydsp_write(tp, 0x000a, 0x310b);
2586 tg3_phydsp_write(tp, 0x201f, 0x9506);
2587 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2588 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2589 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002590 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
Matt Carlson1d36ba42011-04-20 07:57:42 +00002591 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2592 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2593 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2594 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2595 tg3_writephy(tp, MII_TG3_TEST1,
2596 MII_TG3_TEST1_TRIM_EN | 0x4);
2597 } else
2598 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2599
2600 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2601 }
Michael Chanc424cb22006-04-29 18:56:34 -07002602 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 /* Set Extended packet length bit (bit 14) on all chips that */
2605 /* support jumbo frames */
Matt Carlson79eb6902010-02-17 15:17:03 +00002606 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 /* Cannot do read-modify-write on 5401 */
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002608 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
Joe Perches63c3a662011-04-26 08:12:10 +00002609 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 /* Set bit 14 with read-modify-write to preserve other bits */
Matt Carlson15ee95c2011-04-20 07:57:40 +00002611 err = tg3_phy_auxctl_read(tp,
2612 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2613 if (!err)
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002614 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2615 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 }
2617
2618 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2619 * jumbo frames transmission.
2620 */
Joe Perches63c3a662011-04-26 08:12:10 +00002621 if (tg3_flag(tp, JUMBO_CAPABLE)) {
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002622 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
Matt Carlsonc6cdf432010-04-05 10:19:26 +00002623 tg3_writephy(tp, MII_TG3_EXT_CTRL,
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002624 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 }
2626
Michael Chan715116a2006-09-27 16:09:25 -07002627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07002628 /* adjust output voltage */
Matt Carlson535ef6e2009-08-25 10:09:36 +00002629 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07002630 }
2631
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002632 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 tg3_phy_set_wirespeed(tp);
2634 return 0;
2635}
2636
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002637#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2638#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2639#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2640 TG3_GPIO_MSG_NEED_VAUX)
2641#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2642 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2643 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2644 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2645 (TG3_GPIO_MSG_DRVR_PRES << 12))
2646
2647#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2648 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2649 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2650 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2651 (TG3_GPIO_MSG_NEED_VAUX << 12))
2652
2653static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2654{
2655 u32 status, shift;
2656
2657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2658 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2659 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2660 else
2661 status = tr32(TG3_CPMU_DRV_STATUS);
2662
2663 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2664 status &= ~(TG3_GPIO_MSG_MASK << shift);
2665 status |= (newstat << shift);
2666
2667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2669 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2670 else
2671 tw32(TG3_CPMU_DRV_STATUS, status);
2672
2673 return status >> TG3_APE_GPIO_MSG_SHIFT;
2674}
2675
Matt Carlson520b2752011-06-13 13:39:02 +00002676static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2677{
2678 if (!tg3_flag(tp, IS_NIC))
2679 return 0;
2680
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2684 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2685 return -EIO;
Matt Carlson520b2752011-06-13 13:39:02 +00002686
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002687 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2688
2689 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2690 TG3_GRC_LCLCTL_PWRSW_DELAY);
2691
2692 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2693 } else {
2694 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2695 TG3_GRC_LCLCTL_PWRSW_DELAY);
2696 }
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002697
Matt Carlson520b2752011-06-13 13:39:02 +00002698 return 0;
2699}
2700
2701static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2702{
2703 u32 grc_local_ctrl;
2704
2705 if (!tg3_flag(tp, IS_NIC) ||
2706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2708 return;
2709
2710 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2711
2712 tw32_wait_f(GRC_LOCAL_CTRL,
2713 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2714 TG3_GRC_LCLCTL_PWRSW_DELAY);
2715
2716 tw32_wait_f(GRC_LOCAL_CTRL,
2717 grc_local_ctrl,
2718 TG3_GRC_LCLCTL_PWRSW_DELAY);
2719
2720 tw32_wait_f(GRC_LOCAL_CTRL,
2721 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2722 TG3_GRC_LCLCTL_PWRSW_DELAY);
2723}
2724
2725static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2726{
2727 if (!tg3_flag(tp, IS_NIC))
2728 return;
2729
2730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2732 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2733 (GRC_LCLCTRL_GPIO_OE0 |
2734 GRC_LCLCTRL_GPIO_OE1 |
2735 GRC_LCLCTRL_GPIO_OE2 |
2736 GRC_LCLCTRL_GPIO_OUTPUT0 |
2737 GRC_LCLCTRL_GPIO_OUTPUT1),
2738 TG3_GRC_LCLCTL_PWRSW_DELAY);
2739 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2740 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2741 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2742 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2743 GRC_LCLCTRL_GPIO_OE1 |
2744 GRC_LCLCTRL_GPIO_OE2 |
2745 GRC_LCLCTRL_GPIO_OUTPUT0 |
2746 GRC_LCLCTRL_GPIO_OUTPUT1 |
2747 tp->grc_local_ctrl;
2748 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2749 TG3_GRC_LCLCTL_PWRSW_DELAY);
2750
2751 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2752 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2753 TG3_GRC_LCLCTL_PWRSW_DELAY);
2754
2755 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2756 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2757 TG3_GRC_LCLCTL_PWRSW_DELAY);
2758 } else {
2759 u32 no_gpio2;
2760 u32 grc_local_ctrl = 0;
2761
2762 /* Workaround to prevent overdrawing Amps. */
2763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2764 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2766 grc_local_ctrl,
2767 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 }
2769
2770 /* On 5753 and variants, GPIO2 cannot be used. */
2771 no_gpio2 = tp->nic_sram_data_cfg &
2772 NIC_SRAM_DATA_CFG_NO_GPIO2;
2773
2774 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2775 GRC_LCLCTRL_GPIO_OE1 |
2776 GRC_LCLCTRL_GPIO_OE2 |
2777 GRC_LCLCTRL_GPIO_OUTPUT1 |
2778 GRC_LCLCTRL_GPIO_OUTPUT2;
2779 if (no_gpio2) {
2780 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2781 GRC_LCLCTRL_GPIO_OUTPUT2);
2782 }
2783 tw32_wait_f(GRC_LOCAL_CTRL,
2784 tp->grc_local_ctrl | grc_local_ctrl,
2785 TG3_GRC_LCLCTL_PWRSW_DELAY);
2786
2787 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2788
2789 tw32_wait_f(GRC_LOCAL_CTRL,
2790 tp->grc_local_ctrl | grc_local_ctrl,
2791 TG3_GRC_LCLCTL_PWRSW_DELAY);
2792
2793 if (!no_gpio2) {
2794 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2795 tw32_wait_f(GRC_LOCAL_CTRL,
2796 tp->grc_local_ctrl | grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798 }
2799 }
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002800}
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002801
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002802static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002803{
2804 u32 msg = 0;
2805
2806 /* Serialize power state transitions */
2807 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2808 return;
2809
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002810 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002811 msg = TG3_GPIO_MSG_NEED_VAUX;
2812
2813 msg = tg3_set_function_status(tp, msg);
2814
2815 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2816 goto done;
2817
2818 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2819 tg3_pwrsrc_switch_to_vaux(tp);
2820 else
2821 tg3_pwrsrc_die_with_vmain(tp);
2822
2823done:
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
Matt Carlson520b2752011-06-13 13:39:02 +00002825}
2826
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002827static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828{
Matt Carlson683644b2011-03-09 16:58:23 +00002829 bool need_vaux = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Matt Carlson334355a2010-01-20 16:58:10 +00002831 /* The GPIOs do something completely different on 57765. */
Matt Carlson55086ad2011-12-14 11:09:59 +00002832 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 return;
2834
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2836 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002838 tg3_frob_aux_power_5717(tp, include_wol ?
2839 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002840 return;
2841 }
2842
2843 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002844 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002846 dev_peer = pci_get_drvdata(tp->pdev_peer);
Matt Carlson683644b2011-03-09 16:58:23 +00002847
Michael Chanbc1c7562006-03-20 17:48:03 -08002848 /* remove_one() may have been run on the peer. */
Matt Carlson683644b2011-03-09 16:58:23 +00002849 if (dev_peer) {
2850 struct tg3 *tp_peer = netdev_priv(dev_peer);
2851
Joe Perches63c3a662011-04-26 08:12:10 +00002852 if (tg3_flag(tp_peer, INIT_COMPLETE))
Matt Carlson683644b2011-03-09 16:58:23 +00002853 return;
2854
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002855 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
Joe Perches63c3a662011-04-26 08:12:10 +00002856 tg3_flag(tp_peer, ENABLE_ASF))
Matt Carlson683644b2011-03-09 16:58:23 +00002857 need_vaux = true;
2858 }
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002861 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2862 tg3_flag(tp, ENABLE_ASF))
Matt Carlson683644b2011-03-09 16:58:23 +00002863 need_vaux = true;
2864
Matt Carlson520b2752011-06-13 13:39:02 +00002865 if (need_vaux)
2866 tg3_pwrsrc_switch_to_vaux(tp);
2867 else
2868 tg3_pwrsrc_die_with_vmain(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869}
2870
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002871static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2872{
2873 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2874 return 1;
Matt Carlson79eb6902010-02-17 15:17:03 +00002875 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002876 if (speed != SPEED_10)
2877 return 1;
2878 } else if (speed == SPEED_10)
2879 return 1;
2880
2881 return 0;
2882}
2883
Matt Carlson0a459aa2008-11-03 16:54:15 -08002884static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002885{
Matt Carlsonce057f02007-11-12 21:08:03 -08002886 u32 val;
2887
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002888 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Michael Chan51297242007-02-13 12:17:57 -08002889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2890 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2891 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2892
2893 sg_dig_ctrl |=
2894 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2895 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2896 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2897 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002898 return;
Michael Chan51297242007-02-13 12:17:57 -08002899 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002900
Michael Chan60189dd2006-12-17 17:08:07 -08002901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002902 tg3_bmcr_reset(tp);
2903 val = tr32(GRC_MISC_CFG);
2904 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2905 udelay(40);
2906 return;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002907 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson0e5f7842009-11-02 14:26:38 +00002908 u32 phytest;
2909 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2910 u32 phy;
2911
2912 tg3_writephy(tp, MII_ADVERTISE, 0);
2913 tg3_writephy(tp, MII_BMCR,
2914 BMCR_ANENABLE | BMCR_ANRESTART);
2915
2916 tg3_writephy(tp, MII_TG3_FET_TEST,
2917 phytest | MII_TG3_FET_SHADOW_EN);
2918 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2919 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2920 tg3_writephy(tp,
2921 MII_TG3_FET_SHDW_AUXMODE4,
2922 phy);
2923 }
2924 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2925 }
2926 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002927 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002928 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2929 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002930
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002931 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2932 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2933 MII_TG3_AUXCTL_PCTL_VREG_11V;
2934 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
Michael Chan715116a2006-09-27 16:09:25 -07002935 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002936
Michael Chan15c3b692006-03-22 01:06:52 -08002937 /* The PHY should not be powered down on some chips because
2938 * of bugs.
2939 */
2940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2942 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
Matt Carlson085f1af2012-04-02 09:01:40 +00002943 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2944 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2945 !tp->pci_fn))
Michael Chan15c3b692006-03-22 01:06:52 -08002946 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002947
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002948 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2949 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002950 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2951 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2952 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2953 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2954 }
2955
Michael Chan15c3b692006-03-22 01:06:52 -08002956 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2957}
2958
Matt Carlson3f007892008-11-03 16:51:36 -08002959/* tp->lock is held. */
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002960static int tg3_nvram_lock(struct tg3 *tp)
2961{
Joe Perches63c3a662011-04-26 08:12:10 +00002962 if (tg3_flag(tp, NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002963 int i;
2964
2965 if (tp->nvram_lock_cnt == 0) {
2966 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2967 for (i = 0; i < 8000; i++) {
2968 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2969 break;
2970 udelay(20);
2971 }
2972 if (i == 8000) {
2973 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2974 return -ENODEV;
2975 }
2976 }
2977 tp->nvram_lock_cnt++;
2978 }
2979 return 0;
2980}
2981
2982/* tp->lock is held. */
2983static void tg3_nvram_unlock(struct tg3 *tp)
2984{
Joe Perches63c3a662011-04-26 08:12:10 +00002985 if (tg3_flag(tp, NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002986 if (tp->nvram_lock_cnt > 0)
2987 tp->nvram_lock_cnt--;
2988 if (tp->nvram_lock_cnt == 0)
2989 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2990 }
2991}
2992
2993/* tp->lock is held. */
2994static void tg3_enable_nvram_access(struct tg3 *tp)
2995{
Joe Perches63c3a662011-04-26 08:12:10 +00002996 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002997 u32 nvaccess = tr32(NVRAM_ACCESS);
2998
2999 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3000 }
3001}
3002
3003/* tp->lock is held. */
3004static void tg3_disable_nvram_access(struct tg3 *tp)
3005{
Joe Perches63c3a662011-04-26 08:12:10 +00003006 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003007 u32 nvaccess = tr32(NVRAM_ACCESS);
3008
3009 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3010 }
3011}
3012
3013static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3014 u32 offset, u32 *val)
3015{
3016 u32 tmp;
3017 int i;
3018
3019 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3020 return -EINVAL;
3021
3022 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3023 EEPROM_ADDR_DEVID_MASK |
3024 EEPROM_ADDR_READ);
3025 tw32(GRC_EEPROM_ADDR,
3026 tmp |
3027 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3028 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3029 EEPROM_ADDR_ADDR_MASK) |
3030 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3031
3032 for (i = 0; i < 1000; i++) {
3033 tmp = tr32(GRC_EEPROM_ADDR);
3034
3035 if (tmp & EEPROM_ADDR_COMPLETE)
3036 break;
3037 msleep(1);
3038 }
3039 if (!(tmp & EEPROM_ADDR_COMPLETE))
3040 return -EBUSY;
3041
Matt Carlson62cedd12009-04-20 14:52:29 -07003042 tmp = tr32(GRC_EEPROM_DATA);
3043
3044 /*
3045 * The data will always be opposite the native endian
3046 * format. Perform a blind byteswap to compensate.
3047 */
3048 *val = swab32(tmp);
3049
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003050 return 0;
3051}
3052
3053#define NVRAM_CMD_TIMEOUT 10000
3054
3055static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3056{
3057 int i;
3058
3059 tw32(NVRAM_CMD, nvram_cmd);
3060 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3061 udelay(10);
3062 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3063 udelay(10);
3064 break;
3065 }
3066 }
3067
3068 if (i == NVRAM_CMD_TIMEOUT)
3069 return -EBUSY;
3070
3071 return 0;
3072}
3073
3074static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3075{
Joe Perches63c3a662011-04-26 08:12:10 +00003076 if (tg3_flag(tp, NVRAM) &&
3077 tg3_flag(tp, NVRAM_BUFFERED) &&
3078 tg3_flag(tp, FLASH) &&
3079 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003080 (tp->nvram_jedecnum == JEDEC_ATMEL))
3081
3082 addr = ((addr / tp->nvram_pagesize) <<
3083 ATMEL_AT45DB0X1B_PAGE_POS) +
3084 (addr % tp->nvram_pagesize);
3085
3086 return addr;
3087}
3088
3089static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3090{
Joe Perches63c3a662011-04-26 08:12:10 +00003091 if (tg3_flag(tp, NVRAM) &&
3092 tg3_flag(tp, NVRAM_BUFFERED) &&
3093 tg3_flag(tp, FLASH) &&
3094 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003095 (tp->nvram_jedecnum == JEDEC_ATMEL))
3096
3097 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3098 tp->nvram_pagesize) +
3099 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3100
3101 return addr;
3102}
3103
Matt Carlsone4f34112009-02-25 14:25:00 +00003104/* NOTE: Data read in from NVRAM is byteswapped according to
3105 * the byteswapping settings for all other register accesses.
3106 * tg3 devices are BE devices, so on a BE machine, the data
3107 * returned will be exactly as it is seen in NVRAM. On a LE
3108 * machine, the 32-bit value will be byteswapped.
3109 */
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003110static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3111{
3112 int ret;
3113
Joe Perches63c3a662011-04-26 08:12:10 +00003114 if (!tg3_flag(tp, NVRAM))
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003115 return tg3_nvram_read_using_eeprom(tp, offset, val);
3116
3117 offset = tg3_nvram_phys_addr(tp, offset);
3118
3119 if (offset > NVRAM_ADDR_MSK)
3120 return -EINVAL;
3121
3122 ret = tg3_nvram_lock(tp);
3123 if (ret)
3124 return ret;
3125
3126 tg3_enable_nvram_access(tp);
3127
3128 tw32(NVRAM_ADDR, offset);
3129 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3130 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3131
3132 if (ret == 0)
Matt Carlsone4f34112009-02-25 14:25:00 +00003133 *val = tr32(NVRAM_RDDATA);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003134
3135 tg3_disable_nvram_access(tp);
3136
3137 tg3_nvram_unlock(tp);
3138
3139 return ret;
3140}
3141
Matt Carlsona9dc5292009-02-25 14:25:30 +00003142/* Ensures NVRAM data is in bytestream format. */
3143static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003144{
3145 u32 v;
Matt Carlsona9dc5292009-02-25 14:25:30 +00003146 int res = tg3_nvram_read(tp, offset, &v);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003147 if (!res)
Matt Carlsona9dc5292009-02-25 14:25:30 +00003148 *val = cpu_to_be32(v);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003149 return res;
3150}
3151
Matt Carlsondbe9b922012-02-13 10:20:09 +00003152static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3153 u32 offset, u32 len, u8 *buf)
3154{
3155 int i, j, rc = 0;
3156 u32 val;
3157
3158 for (i = 0; i < len; i += 4) {
3159 u32 addr;
3160 __be32 data;
3161
3162 addr = offset + i;
3163
3164 memcpy(&data, buf + i, 4);
3165
3166 /*
3167 * The SEEPROM interface expects the data to always be opposite
3168 * the native endian format. We accomplish this by reversing
3169 * all the operations that would have been performed on the
3170 * data from a call to tg3_nvram_read_be32().
3171 */
3172 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3173
3174 val = tr32(GRC_EEPROM_ADDR);
3175 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3176
3177 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3178 EEPROM_ADDR_READ);
3179 tw32(GRC_EEPROM_ADDR, val |
3180 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3181 (addr & EEPROM_ADDR_ADDR_MASK) |
3182 EEPROM_ADDR_START |
3183 EEPROM_ADDR_WRITE);
3184
3185 for (j = 0; j < 1000; j++) {
3186 val = tr32(GRC_EEPROM_ADDR);
3187
3188 if (val & EEPROM_ADDR_COMPLETE)
3189 break;
3190 msleep(1);
3191 }
3192 if (!(val & EEPROM_ADDR_COMPLETE)) {
3193 rc = -EBUSY;
3194 break;
3195 }
3196 }
3197
3198 return rc;
3199}
3200
3201/* offset and length are dword aligned */
3202static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3203 u8 *buf)
3204{
3205 int ret = 0;
3206 u32 pagesize = tp->nvram_pagesize;
3207 u32 pagemask = pagesize - 1;
3208 u32 nvram_cmd;
3209 u8 *tmp;
3210
3211 tmp = kmalloc(pagesize, GFP_KERNEL);
3212 if (tmp == NULL)
3213 return -ENOMEM;
3214
3215 while (len) {
3216 int j;
3217 u32 phy_addr, page_off, size;
3218
3219 phy_addr = offset & ~pagemask;
3220
3221 for (j = 0; j < pagesize; j += 4) {
3222 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3223 (__be32 *) (tmp + j));
3224 if (ret)
3225 break;
3226 }
3227 if (ret)
3228 break;
3229
3230 page_off = offset & pagemask;
3231 size = pagesize;
3232 if (len < size)
3233 size = len;
3234
3235 len -= size;
3236
3237 memcpy(tmp + page_off, buf, size);
3238
3239 offset = offset + (pagesize - page_off);
3240
3241 tg3_enable_nvram_access(tp);
3242
3243 /*
3244 * Before we can erase the flash page, we need
3245 * to issue a special "write enable" command.
3246 */
3247 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248
3249 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3250 break;
3251
3252 /* Erase the target page */
3253 tw32(NVRAM_ADDR, phy_addr);
3254
3255 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3256 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3257
3258 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3259 break;
3260
3261 /* Issue another write enable to start the write. */
3262 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3263
3264 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3265 break;
3266
3267 for (j = 0; j < pagesize; j += 4) {
3268 __be32 data;
3269
3270 data = *((__be32 *) (tmp + j));
3271
3272 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3273
3274 tw32(NVRAM_ADDR, phy_addr + j);
3275
3276 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3277 NVRAM_CMD_WR;
3278
3279 if (j == 0)
3280 nvram_cmd |= NVRAM_CMD_FIRST;
3281 else if (j == (pagesize - 4))
3282 nvram_cmd |= NVRAM_CMD_LAST;
3283
3284 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3285 if (ret)
3286 break;
3287 }
3288 if (ret)
3289 break;
3290 }
3291
3292 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293 tg3_nvram_exec_cmd(tp, nvram_cmd);
3294
3295 kfree(tmp);
3296
3297 return ret;
3298}
3299
3300/* offset and length are dword aligned */
3301static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3302 u8 *buf)
3303{
3304 int i, ret = 0;
3305
3306 for (i = 0; i < len; i += 4, offset += 4) {
3307 u32 page_off, phy_addr, nvram_cmd;
3308 __be32 data;
3309
3310 memcpy(&data, buf + i, 4);
3311 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3312
3313 page_off = offset % tp->nvram_pagesize;
3314
3315 phy_addr = tg3_nvram_phys_addr(tp, offset);
3316
Matt Carlsondbe9b922012-02-13 10:20:09 +00003317 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3318
3319 if (page_off == 0 || i == 0)
3320 nvram_cmd |= NVRAM_CMD_FIRST;
3321 if (page_off == (tp->nvram_pagesize - 4))
3322 nvram_cmd |= NVRAM_CMD_LAST;
3323
3324 if (i == (len - 4))
3325 nvram_cmd |= NVRAM_CMD_LAST;
3326
Matt Carlson42278222012-02-13 15:20:11 +00003327 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3328 !tg3_flag(tp, FLASH) ||
3329 !tg3_flag(tp, 57765_PLUS))
3330 tw32(NVRAM_ADDR, phy_addr);
3331
Matt Carlsondbe9b922012-02-13 10:20:09 +00003332 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3333 !tg3_flag(tp, 5755_PLUS) &&
3334 (tp->nvram_jedecnum == JEDEC_ST) &&
3335 (nvram_cmd & NVRAM_CMD_FIRST)) {
3336 u32 cmd;
3337
3338 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3339 ret = tg3_nvram_exec_cmd(tp, cmd);
3340 if (ret)
3341 break;
3342 }
3343 if (!tg3_flag(tp, FLASH)) {
3344 /* We always do complete word writes to eeprom. */
3345 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3346 }
3347
3348 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3349 if (ret)
3350 break;
3351 }
3352 return ret;
3353}
3354
3355/* offset and length are dword aligned */
3356static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3357{
3358 int ret;
3359
3360 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3361 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3362 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3363 udelay(40);
3364 }
3365
3366 if (!tg3_flag(tp, NVRAM)) {
3367 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3368 } else {
3369 u32 grc_mode;
3370
3371 ret = tg3_nvram_lock(tp);
3372 if (ret)
3373 return ret;
3374
3375 tg3_enable_nvram_access(tp);
3376 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3377 tw32(NVRAM_WRITE1, 0x406);
3378
3379 grc_mode = tr32(GRC_MODE);
3380 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3381
3382 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3383 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3384 buf);
3385 } else {
3386 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3387 buf);
3388 }
3389
3390 grc_mode = tr32(GRC_MODE);
3391 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3392
3393 tg3_disable_nvram_access(tp);
3394 tg3_nvram_unlock(tp);
3395 }
3396
3397 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3398 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3399 udelay(40);
3400 }
3401
3402 return ret;
3403}
3404
Matt Carlson997b4f12011-08-31 11:44:53 +00003405#define RX_CPU_SCRATCH_BASE 0x30000
3406#define RX_CPU_SCRATCH_SIZE 0x04000
3407#define TX_CPU_SCRATCH_BASE 0x34000
3408#define TX_CPU_SCRATCH_SIZE 0x04000
3409
3410/* tp->lock is held. */
3411static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3412{
3413 int i;
3414
3415 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3416
3417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3418 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3419
3420 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3421 return 0;
3422 }
3423 if (offset == RX_CPU_BASE) {
3424 for (i = 0; i < 10000; i++) {
3425 tw32(offset + CPU_STATE, 0xffffffff);
3426 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3427 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3428 break;
3429 }
3430
3431 tw32(offset + CPU_STATE, 0xffffffff);
3432 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3433 udelay(10);
3434 } else {
3435 for (i = 0; i < 10000; i++) {
3436 tw32(offset + CPU_STATE, 0xffffffff);
3437 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3438 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3439 break;
3440 }
3441 }
3442
3443 if (i >= 10000) {
3444 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3445 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3446 return -ENODEV;
3447 }
3448
3449 /* Clear firmware's nvram arbitration. */
3450 if (tg3_flag(tp, NVRAM))
3451 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3452 return 0;
3453}
3454
3455struct fw_info {
3456 unsigned int fw_base;
3457 unsigned int fw_len;
3458 const __be32 *fw_data;
3459};
3460
3461/* tp->lock is held. */
3462static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3463 u32 cpu_scratch_base, int cpu_scratch_size,
3464 struct fw_info *info)
3465{
3466 int err, lock_err, i;
3467 void (*write_op)(struct tg3 *, u32, u32);
3468
3469 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3470 netdev_err(tp->dev,
3471 "%s: Trying to load TX cpu firmware which is 5705\n",
3472 __func__);
3473 return -EINVAL;
3474 }
3475
3476 if (tg3_flag(tp, 5705_PLUS))
3477 write_op = tg3_write_mem;
3478 else
3479 write_op = tg3_write_indirect_reg32;
3480
3481 /* It is possible that bootcode is still loading at this point.
3482 * Get the nvram lock first before halting the cpu.
3483 */
3484 lock_err = tg3_nvram_lock(tp);
3485 err = tg3_halt_cpu(tp, cpu_base);
3486 if (!lock_err)
3487 tg3_nvram_unlock(tp);
3488 if (err)
3489 goto out;
3490
3491 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3492 write_op(tp, cpu_scratch_base + i, 0);
3493 tw32(cpu_base + CPU_STATE, 0xffffffff);
3494 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3495 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3496 write_op(tp, (cpu_scratch_base +
3497 (info->fw_base & 0xffff) +
3498 (i * sizeof(u32))),
3499 be32_to_cpu(info->fw_data[i]));
3500
3501 err = 0;
3502
3503out:
3504 return err;
3505}
3506
3507/* tp->lock is held. */
3508static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3509{
3510 struct fw_info info;
3511 const __be32 *fw_data;
3512 int err, i;
3513
3514 fw_data = (void *)tp->fw->data;
3515
3516 /* Firmware blob starts with version numbers, followed by
3517 start address and length. We are setting complete length.
3518 length = end_address_of_bss - start_address_of_text.
3519 Remainder is the blob to be loaded contiguously
3520 from start address. */
3521
3522 info.fw_base = be32_to_cpu(fw_data[1]);
3523 info.fw_len = tp->fw->size - 12;
3524 info.fw_data = &fw_data[3];
3525
3526 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3527 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3528 &info);
3529 if (err)
3530 return err;
3531
3532 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3533 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3534 &info);
3535 if (err)
3536 return err;
3537
3538 /* Now startup only the RX cpu. */
3539 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3540 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3541
3542 for (i = 0; i < 5; i++) {
3543 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3544 break;
3545 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3546 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3547 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3548 udelay(1000);
3549 }
3550 if (i >= 5) {
3551 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3552 "should be %08x\n", __func__,
3553 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3554 return -ENODEV;
3555 }
3556 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3557 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3558
3559 return 0;
3560}
3561
3562/* tp->lock is held. */
3563static int tg3_load_tso_firmware(struct tg3 *tp)
3564{
3565 struct fw_info info;
3566 const __be32 *fw_data;
3567 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3568 int err, i;
3569
3570 if (tg3_flag(tp, HW_TSO_1) ||
3571 tg3_flag(tp, HW_TSO_2) ||
3572 tg3_flag(tp, HW_TSO_3))
3573 return 0;
3574
3575 fw_data = (void *)tp->fw->data;
3576
3577 /* Firmware blob starts with version numbers, followed by
3578 start address and length. We are setting complete length.
3579 length = end_address_of_bss - start_address_of_text.
3580 Remainder is the blob to be loaded contiguously
3581 from start address. */
3582
3583 info.fw_base = be32_to_cpu(fw_data[1]);
3584 cpu_scratch_size = tp->fw_len;
3585 info.fw_len = tp->fw->size - 12;
3586 info.fw_data = &fw_data[3];
3587
3588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3589 cpu_base = RX_CPU_BASE;
3590 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3591 } else {
3592 cpu_base = TX_CPU_BASE;
3593 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3594 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3595 }
3596
3597 err = tg3_load_firmware_cpu(tp, cpu_base,
3598 cpu_scratch_base, cpu_scratch_size,
3599 &info);
3600 if (err)
3601 return err;
3602
3603 /* Now startup the cpu. */
3604 tw32(cpu_base + CPU_STATE, 0xffffffff);
3605 tw32_f(cpu_base + CPU_PC, info.fw_base);
3606
3607 for (i = 0; i < 5; i++) {
3608 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3609 break;
3610 tw32(cpu_base + CPU_STATE, 0xffffffff);
3611 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3612 tw32_f(cpu_base + CPU_PC, info.fw_base);
3613 udelay(1000);
3614 }
3615 if (i >= 5) {
3616 netdev_err(tp->dev,
3617 "%s fails to set CPU PC, is %08x should be %08x\n",
3618 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3619 return -ENODEV;
3620 }
3621 tw32(cpu_base + CPU_STATE, 0xffffffff);
3622 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3623 return 0;
3624}
3625
3626
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003627/* tp->lock is held. */
Matt Carlson3f007892008-11-03 16:51:36 -08003628static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3629{
3630 u32 addr_high, addr_low;
3631 int i;
3632
3633 addr_high = ((tp->dev->dev_addr[0] << 8) |
3634 tp->dev->dev_addr[1]);
3635 addr_low = ((tp->dev->dev_addr[2] << 24) |
3636 (tp->dev->dev_addr[3] << 16) |
3637 (tp->dev->dev_addr[4] << 8) |
3638 (tp->dev->dev_addr[5] << 0));
3639 for (i = 0; i < 4; i++) {
3640 if (i == 1 && skip_mac_1)
3641 continue;
3642 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3643 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3644 }
3645
3646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3648 for (i = 0; i < 12; i++) {
3649 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3650 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3651 }
3652 }
3653
3654 addr_high = (tp->dev->dev_addr[0] +
3655 tp->dev->dev_addr[1] +
3656 tp->dev->dev_addr[2] +
3657 tp->dev->dev_addr[3] +
3658 tp->dev->dev_addr[4] +
3659 tp->dev->dev_addr[5]) &
3660 TX_BACKOFF_SEED_MASK;
3661 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3662}
3663
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003664static void tg3_enable_register_access(struct tg3 *tp)
3665{
3666 /*
3667 * Make sure register accesses (indirect or otherwise) will function
3668 * correctly.
3669 */
3670 pci_write_config_dword(tp->pdev,
3671 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3672}
3673
3674static int tg3_power_up(struct tg3 *tp)
3675{
Matt Carlsonbed98292011-07-13 09:27:29 +00003676 int err;
3677
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003678 tg3_enable_register_access(tp);
3679
Matt Carlsonbed98292011-07-13 09:27:29 +00003680 err = pci_set_power_state(tp->pdev, PCI_D0);
3681 if (!err) {
3682 /* Switch out of Vaux if it is a NIC */
3683 tg3_pwrsrc_switch_to_vmain(tp);
3684 } else {
3685 netdev_err(tp->dev, "Transition to D0 failed\n");
3686 }
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003687
Matt Carlsonbed98292011-07-13 09:27:29 +00003688 return err;
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003689}
3690
Matt Carlson4b409522012-02-13 10:20:11 +00003691static int tg3_setup_phy(struct tg3 *, int);
3692
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003693static int tg3_power_down_prepare(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694{
3695 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003696 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003698 tg3_enable_register_access(tp);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08003699
3700 /* Restore the CLKREQ setting. */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06003701 if (tg3_flag(tp, CLKREQ_BUG))
3702 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3703 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08003704
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3706 tw32(TG3PCI_MISC_HOST_CTRL,
3707 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3708
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003709 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
Joe Perches63c3a662011-04-26 08:12:10 +00003710 tg3_flag(tp, WOL_ENABLE);
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003711
Joe Perches63c3a662011-04-26 08:12:10 +00003712 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08003713 do_low_power = false;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003714 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
Matt Carlson80096062010-08-02 11:26:06 +00003715 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003716 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003717 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003718
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00003719 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003720
Matt Carlson80096062010-08-02 11:26:06 +00003721 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003722
Matt Carlsonc6700ce2012-02-13 15:20:15 +00003723 tp->link_config.speed = phydev->speed;
3724 tp->link_config.duplex = phydev->duplex;
3725 tp->link_config.autoneg = phydev->autoneg;
3726 tp->link_config.advertising = phydev->advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003727
3728 advertising = ADVERTISED_TP |
3729 ADVERTISED_Pause |
3730 ADVERTISED_Autoneg |
3731 ADVERTISED_10baseT_Half;
3732
Joe Perches63c3a662011-04-26 08:12:10 +00003733 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3734 if (tg3_flag(tp, WOL_SPEED_100MB))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003735 advertising |=
3736 ADVERTISED_100baseT_Half |
3737 ADVERTISED_100baseT_Full |
3738 ADVERTISED_10baseT_Full;
3739 else
3740 advertising |= ADVERTISED_10baseT_Full;
3741 }
3742
3743 phydev->advertising = advertising;
3744
3745 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08003746
3747 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
Matt Carlson6a443a02010-02-17 15:17:04 +00003748 if (phyid != PHY_ID_BCMAC131) {
3749 phyid &= PHY_BCM_OUI_MASK;
3750 if (phyid == PHY_BCM_OUI_1 ||
3751 phyid == PHY_BCM_OUI_2 ||
3752 phyid == PHY_BCM_OUI_3)
Matt Carlson0a459aa2008-11-03 16:54:15 -08003753 do_low_power = true;
3754 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003755 }
Matt Carlsondd477002008-05-25 23:45:58 -07003756 } else {
Matt Carlson20232762008-12-21 20:18:56 -08003757 do_low_power = true;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003758
Matt Carlsonc6700ce2012-02-13 15:20:15 +00003759 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
Matt Carlson80096062010-08-02 11:26:06 +00003760 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761
Matt Carlson2855b9f2012-02-13 15:20:14 +00003762 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlsondd477002008-05-25 23:45:58 -07003763 tg3_setup_phy(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 }
3765
Michael Chanb5d37722006-09-27 16:06:21 -07003766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3767 u32 val;
3768
3769 val = tr32(GRC_VCPU_EXT_CTRL);
3770 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
Joe Perches63c3a662011-04-26 08:12:10 +00003771 } else if (!tg3_flag(tp, ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08003772 int i;
3773 u32 val;
3774
3775 for (i = 0; i < 200; i++) {
3776 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3777 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3778 break;
3779 msleep(1);
3780 }
3781 }
Joe Perches63c3a662011-04-26 08:12:10 +00003782 if (tg3_flag(tp, WOL_CAP))
Gary Zambranoa85feb82007-05-05 11:52:19 -07003783 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3784 WOL_DRV_STATE_SHUTDOWN |
3785 WOL_DRV_WOL |
3786 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08003787
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003788 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 u32 mac_mode;
3790
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003791 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
Matt Carlsonb4bd2922011-04-20 07:57:41 +00003792 if (do_low_power &&
3793 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3794 tg3_phy_auxctl_write(tp,
3795 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3796 MII_TG3_AUXCTL_PCTL_WOL_EN |
3797 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3798 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
Matt Carlsondd477002008-05-25 23:45:58 -07003799 udelay(40);
3800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003802 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chan3f7045c2006-09-27 16:02:29 -07003803 mac_mode = MAC_MODE_PORT_MODE_GMII;
3804 else
3805 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003807 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3808 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3809 ASIC_REV_5700) {
Joe Perches63c3a662011-04-26 08:12:10 +00003810 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003811 SPEED_100 : SPEED_10;
3812 if (tg3_5700_link_polarity(tp, speed))
3813 mac_mode |= MAC_MODE_LINK_POLARITY;
3814 else
3815 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 } else {
3818 mac_mode = MAC_MODE_PORT_MODE_TBI;
3819 }
3820
Joe Perches63c3a662011-04-26 08:12:10 +00003821 if (!tg3_flag(tp, 5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 tw32(MAC_LED_CTRL, tp->led_ctrl);
3823
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003824 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00003825 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3826 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003827 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828
Joe Perches63c3a662011-04-26 08:12:10 +00003829 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsond2394e6b2010-11-24 08:31:47 +00003830 mac_mode |= MAC_MODE_APE_TX_EN |
3831 MAC_MODE_APE_RX_EN |
3832 MAC_MODE_TDE_ENABLE;
Matt Carlson3bda1252008-08-15 14:08:22 -07003833
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 tw32_f(MAC_MODE, mac_mode);
3835 udelay(100);
3836
3837 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3838 udelay(10);
3839 }
3840
Joe Perches63c3a662011-04-26 08:12:10 +00003841 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3844 u32 base_val;
3845
3846 base_val = tp->pci_clock_ctrl;
3847 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3848 CLOCK_CTRL_TXCLK_DISABLE);
3849
Michael Chanb401e9e2005-12-19 16:27:04 -08003850 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3851 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Joe Perches63c3a662011-04-26 08:12:10 +00003852 } else if (tg3_flag(tp, 5780_CLASS) ||
3853 tg3_flag(tp, CPMU_PRESENT) ||
Matt Carlson6ff6f812011-05-19 12:12:54 +00003854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan4cf78e42005-07-25 12:29:19 -07003855 /* do nothing */
Joe Perches63c3a662011-04-26 08:12:10 +00003856 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 u32 newbits1, newbits2;
3858
3859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3861 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3862 CLOCK_CTRL_TXCLK_DISABLE |
3863 CLOCK_CTRL_ALTCLK);
3864 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
Joe Perches63c3a662011-04-26 08:12:10 +00003865 } else if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866 newbits1 = CLOCK_CTRL_625_CORE;
3867 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3868 } else {
3869 newbits1 = CLOCK_CTRL_ALTCLK;
3870 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3871 }
3872
Michael Chanb401e9e2005-12-19 16:27:04 -08003873 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3874 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875
Michael Chanb401e9e2005-12-19 16:27:04 -08003876 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3877 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878
Joe Perches63c3a662011-04-26 08:12:10 +00003879 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 u32 newbits3;
3881
3882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3884 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3885 CLOCK_CTRL_TXCLK_DISABLE |
3886 CLOCK_CTRL_44MHZ_CORE);
3887 } else {
3888 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3889 }
3890
Michael Chanb401e9e2005-12-19 16:27:04 -08003891 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3892 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 }
3894 }
3895
Joe Perches63c3a662011-04-26 08:12:10 +00003896 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
Matt Carlson0a459aa2008-11-03 16:54:15 -08003897 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08003898
Matt Carlsoncd0d7222011-07-13 09:27:33 +00003899 tg3_frob_aux_power(tp, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900
3901 /* Workaround for unstable PLL clock */
3902 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3903 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3904 u32 val = tr32(0x7d00);
3905
3906 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3907 tw32(0x7d00, val);
Joe Perches63c3a662011-04-26 08:12:10 +00003908 if (!tg3_flag(tp, ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08003909 int err;
3910
3911 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08003913 if (!err)
3914 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08003915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 }
3917
Michael Chanbbadf502006-04-06 21:46:34 -07003918 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3919
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 return 0;
3921}
3922
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003923static void tg3_power_down(struct tg3 *tp)
3924{
3925 tg3_power_down_prepare(tp);
3926
Joe Perches63c3a662011-04-26 08:12:10 +00003927 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003928 pci_set_power_state(tp->pdev, PCI_D3hot);
3929}
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3932{
3933 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3934 case MII_TG3_AUX_STAT_10HALF:
3935 *speed = SPEED_10;
3936 *duplex = DUPLEX_HALF;
3937 break;
3938
3939 case MII_TG3_AUX_STAT_10FULL:
3940 *speed = SPEED_10;
3941 *duplex = DUPLEX_FULL;
3942 break;
3943
3944 case MII_TG3_AUX_STAT_100HALF:
3945 *speed = SPEED_100;
3946 *duplex = DUPLEX_HALF;
3947 break;
3948
3949 case MII_TG3_AUX_STAT_100FULL:
3950 *speed = SPEED_100;
3951 *duplex = DUPLEX_FULL;
3952 break;
3953
3954 case MII_TG3_AUX_STAT_1000HALF:
3955 *speed = SPEED_1000;
3956 *duplex = DUPLEX_HALF;
3957 break;
3958
3959 case MII_TG3_AUX_STAT_1000FULL:
3960 *speed = SPEED_1000;
3961 *duplex = DUPLEX_FULL;
3962 break;
3963
3964 default:
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003965 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Michael Chan715116a2006-09-27 16:09:25 -07003966 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3967 SPEED_10;
3968 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3969 DUPLEX_HALF;
3970 break;
3971 }
Matt Carlsone7405222012-02-13 15:20:16 +00003972 *speed = SPEED_UNKNOWN;
3973 *duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976}
3977
Matt Carlson42b64a42011-05-19 12:12:49 +00003978static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979{
Matt Carlson42b64a42011-05-19 12:12:49 +00003980 int err = 0;
3981 u32 val, new_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982
Matt Carlson42b64a42011-05-19 12:12:49 +00003983 new_adv = ADVERTISE_CSMA;
Hiroaki SHIMODA202ff1c2011-11-22 04:05:41 +00003984 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
Matt Carlsonf88788f2011-12-14 11:10:00 +00003985 new_adv |= mii_advertise_flowctrl(flowctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986
Matt Carlson42b64a42011-05-19 12:12:49 +00003987 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3988 if (err)
3989 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990
Matt Carlson4f272092011-12-14 11:09:57 +00003991 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3992 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003993
Matt Carlson4f272092011-12-14 11:09:57 +00003994 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3995 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3996 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003997
Matt Carlson4f272092011-12-14 11:09:57 +00003998 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3999 if (err)
4000 goto done;
4001 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08004002
Matt Carlson42b64a42011-05-19 12:12:49 +00004003 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4004 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005
Matt Carlson42b64a42011-05-19 12:12:49 +00004006 tw32(TG3_CPMU_EEE_MODE,
4007 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08004008
Matt Carlson42b64a42011-05-19 12:12:49 +00004009 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4010 if (!err) {
4011 u32 err2;
Matt Carlson52b02d02010-10-14 10:37:41 +00004012
Matt Carlsona6b68da2010-12-06 08:28:52 +00004013 val = 0;
Matt Carlson42b64a42011-05-19 12:12:49 +00004014 /* Advertise 100-BaseTX EEE ability */
4015 if (advertise & ADVERTISED_100baseT_Full)
4016 val |= MDIO_AN_EEE_ADV_100TX;
4017 /* Advertise 1000-BaseT EEE ability */
4018 if (advertise & ADVERTISED_1000baseT_Full)
4019 val |= MDIO_AN_EEE_ADV_1000T;
4020 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
Matt Carlsonb715ce92011-07-20 10:20:52 +00004021 if (err)
4022 val = 0;
4023
4024 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4025 case ASIC_REV_5717:
4026 case ASIC_REV_57765:
Matt Carlson55086ad2011-12-14 11:09:59 +00004027 case ASIC_REV_57766:
Matt Carlsonb715ce92011-07-20 10:20:52 +00004028 case ASIC_REV_5719:
4029 /* If we advertised any eee advertisements above... */
4030 if (val)
4031 val = MII_TG3_DSP_TAP26_ALNOKO |
4032 MII_TG3_DSP_TAP26_RMRXSTO |
4033 MII_TG3_DSP_TAP26_OPCSINPT;
4034 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4035 /* Fall through */
4036 case ASIC_REV_5720:
4037 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4038 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4039 MII_TG3_DSP_CH34TP2_HIBW01);
4040 }
Matt Carlson52b02d02010-10-14 10:37:41 +00004041
Matt Carlson42b64a42011-05-19 12:12:49 +00004042 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4043 if (!err)
4044 err = err2;
4045 }
4046
4047done:
4048 return err;
4049}
4050
4051static void tg3_phy_copper_begin(struct tg3 *tp)
4052{
Matt Carlsond13ba512012-02-22 12:35:19 +00004053 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4054 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4055 u32 adv, fc;
Matt Carlson42b64a42011-05-19 12:12:49 +00004056
Matt Carlsond13ba512012-02-22 12:35:19 +00004057 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4058 adv = ADVERTISED_10baseT_Half |
4059 ADVERTISED_10baseT_Full;
4060 if (tg3_flag(tp, WOL_SPEED_100MB))
4061 adv |= ADVERTISED_100baseT_Half |
4062 ADVERTISED_100baseT_Full;
Matt Carlson42b64a42011-05-19 12:12:49 +00004063
Matt Carlsond13ba512012-02-22 12:35:19 +00004064 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
Matt Carlson42b64a42011-05-19 12:12:49 +00004065 } else {
Matt Carlsond13ba512012-02-22 12:35:19 +00004066 adv = tp->link_config.advertising;
4067 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4068 adv &= ~(ADVERTISED_1000baseT_Half |
4069 ADVERTISED_1000baseT_Full);
4070
4071 fc = tp->link_config.flowctrl;
Matt Carlson42b64a42011-05-19 12:12:49 +00004072 }
4073
Matt Carlsond13ba512012-02-22 12:35:19 +00004074 tg3_phy_autoneg_cfg(tp, adv, fc);
Matt Carlson52b02d02010-10-14 10:37:41 +00004075
Matt Carlsond13ba512012-02-22 12:35:19 +00004076 tg3_writephy(tp, MII_BMCR,
4077 BMCR_ANENABLE | BMCR_ANRESTART);
4078 } else {
4079 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080 u32 bmcr, orig_bmcr;
4081
4082 tp->link_config.active_speed = tp->link_config.speed;
4083 tp->link_config.active_duplex = tp->link_config.duplex;
4084
4085 bmcr = 0;
4086 switch (tp->link_config.speed) {
4087 default:
4088 case SPEED_10:
4089 break;
4090
4091 case SPEED_100:
4092 bmcr |= BMCR_SPEED100;
4093 break;
4094
4095 case SPEED_1000:
Matt Carlson221c5632011-06-13 13:39:01 +00004096 bmcr |= BMCR_SPEED1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099
4100 if (tp->link_config.duplex == DUPLEX_FULL)
4101 bmcr |= BMCR_FULLDPLX;
4102
4103 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4104 (bmcr != orig_bmcr)) {
4105 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4106 for (i = 0; i < 1500; i++) {
4107 u32 tmp;
4108
4109 udelay(10);
4110 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4111 tg3_readphy(tp, MII_BMSR, &tmp))
4112 continue;
4113 if (!(tmp & BMSR_LSTATUS)) {
4114 udelay(40);
4115 break;
4116 }
4117 }
4118 tg3_writephy(tp, MII_BMCR, bmcr);
4119 udelay(40);
4120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121 }
4122}
4123
4124static int tg3_init_5401phy_dsp(struct tg3 *tp)
4125{
4126 int err;
4127
4128 /* Turn off tap power management. */
4129 /* Set Extended packet length bit */
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004130 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00004132 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4133 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4134 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4135 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4136 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137
4138 udelay(40);
4139
4140 return err;
4141}
4142
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004143static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144{
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004145 u32 advmsk, tgtadv, advertising;
Michael Chan3600d912006-12-07 00:21:48 -08004146
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004147 advertising = tp->link_config.advertising;
4148 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004150 advmsk = ADVERTISE_ALL;
4151 if (tp->link_config.active_duplex == DUPLEX_FULL) {
Matt Carlsonf88788f2011-12-14 11:10:00 +00004152 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004153 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004156 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4157 return false;
4158
4159 if ((*lcladv & advmsk) != tgtadv)
4160 return false;
Matt Carlsonb99d2a52011-08-31 11:44:47 +00004161
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004162 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 u32 tg3_ctrl;
4164
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004165 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
Michael Chan3600d912006-12-07 00:21:48 -08004166
Matt Carlson221c5632011-06-13 13:39:01 +00004167 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004168 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169
Matt Carlson3198e072012-02-13 15:20:10 +00004170 if (tgtadv &&
4171 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4172 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4173 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4174 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4175 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4176 } else {
4177 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4178 }
4179
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004180 if (tg3_ctrl != tgtadv)
4181 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 }
Matt Carlson93a700a2011-08-31 11:44:54 +00004183
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004184 return true;
Matt Carlsonef167e22007-12-20 20:10:01 -08004185}
4186
Matt Carlson859edb22011-12-08 14:40:16 +00004187static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4188{
4189 u32 lpeth = 0;
4190
4191 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4192 u32 val;
4193
4194 if (tg3_readphy(tp, MII_STAT1000, &val))
4195 return false;
4196
4197 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4198 }
4199
4200 if (tg3_readphy(tp, MII_LPA, rmtadv))
4201 return false;
4202
4203 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4204 tp->link_config.rmt_adv = lpeth;
4205
4206 return true;
4207}
4208
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00004209static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4210{
4211 if (curr_link_up != tp->link_up) {
4212 if (curr_link_up) {
4213 tg3_carrier_on(tp);
4214 } else {
4215 tg3_carrier_off(tp);
4216 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4217 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4218 }
4219
4220 tg3_link_report(tp);
4221 return true;
4222 }
4223
4224 return false;
4225}
4226
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4228{
4229 int current_link_up;
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004230 u32 bmsr, val;
Matt Carlsonef167e22007-12-20 20:10:01 -08004231 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 u16 current_speed;
4233 u8 current_duplex;
4234 int i, err;
4235
4236 tw32(MAC_EVENT, 0);
4237
4238 tw32_f(MAC_STATUS,
4239 (MAC_STATUS_SYNC_CHANGED |
4240 MAC_STATUS_CFG_CHANGED |
4241 MAC_STATUS_MI_COMPLETION |
4242 MAC_STATUS_LNKSTATE_CHANGED));
4243 udelay(40);
4244
Matt Carlson8ef21422008-05-02 16:47:53 -07004245 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4246 tw32_f(MAC_MI_MODE,
4247 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4248 udelay(80);
4249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004251 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252
4253 /* Some third-party PHYs need to be reset on link going
4254 * down.
4255 */
4256 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00004259 tp->link_up) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260 tg3_readphy(tp, MII_BMSR, &bmsr);
4261 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4262 !(bmsr & BMSR_LSTATUS))
4263 force_reset = 1;
4264 }
4265 if (force_reset)
4266 tg3_phy_reset(tp);
4267
Matt Carlson79eb6902010-02-17 15:17:03 +00004268 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269 tg3_readphy(tp, MII_BMSR, &bmsr);
4270 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
Joe Perches63c3a662011-04-26 08:12:10 +00004271 !tg3_flag(tp, INIT_COMPLETE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 bmsr = 0;
4273
4274 if (!(bmsr & BMSR_LSTATUS)) {
4275 err = tg3_init_5401phy_dsp(tp);
4276 if (err)
4277 return err;
4278
4279 tg3_readphy(tp, MII_BMSR, &bmsr);
4280 for (i = 0; i < 1000; i++) {
4281 udelay(10);
4282 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4283 (bmsr & BMSR_LSTATUS)) {
4284 udelay(40);
4285 break;
4286 }
4287 }
4288
Matt Carlson79eb6902010-02-17 15:17:03 +00004289 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4290 TG3_PHY_REV_BCM5401_B0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 !(bmsr & BMSR_LSTATUS) &&
4292 tp->link_config.active_speed == SPEED_1000) {
4293 err = tg3_phy_reset(tp);
4294 if (!err)
4295 err = tg3_init_5401phy_dsp(tp);
4296 if (err)
4297 return err;
4298 }
4299 }
4300 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4301 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4302 /* 5701 {A0,B0} CRC bug workaround */
4303 tg3_writephy(tp, 0x15, 0x0a75);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00004304 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4305 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4306 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307 }
4308
4309 /* Clear pending interrupts... */
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004310 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4311 tg3_readphy(tp, MII_TG3_ISTAT, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004313 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004315 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4317
4318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4320 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4321 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4322 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4323 else
4324 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4325 }
4326
4327 current_link_up = 0;
Matt Carlsone7405222012-02-13 15:20:16 +00004328 current_speed = SPEED_UNKNOWN;
4329 current_duplex = DUPLEX_UNKNOWN;
Matt Carlsone348c5e2011-11-21 15:01:20 +00004330 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
Matt Carlson859edb22011-12-08 14:40:16 +00004331 tp->link_config.rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004333 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
Matt Carlson15ee95c2011-04-20 07:57:40 +00004334 err = tg3_phy_auxctl_read(tp,
4335 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4336 &val);
4337 if (!err && !(val & (1 << 10))) {
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004338 tg3_phy_auxctl_write(tp,
4339 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4340 val | (1 << 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 goto relink;
4342 }
4343 }
4344
4345 bmsr = 0;
4346 for (i = 0; i < 100; i++) {
4347 tg3_readphy(tp, MII_BMSR, &bmsr);
4348 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4349 (bmsr & BMSR_LSTATUS))
4350 break;
4351 udelay(40);
4352 }
4353
4354 if (bmsr & BMSR_LSTATUS) {
4355 u32 aux_stat, bmcr;
4356
4357 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4358 for (i = 0; i < 2000; i++) {
4359 udelay(10);
4360 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4361 aux_stat)
4362 break;
4363 }
4364
4365 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4366 &current_speed,
4367 &current_duplex);
4368
4369 bmcr = 0;
4370 for (i = 0; i < 200; i++) {
4371 tg3_readphy(tp, MII_BMCR, &bmcr);
4372 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4373 continue;
4374 if (bmcr && bmcr != 0x7fff)
4375 break;
4376 udelay(10);
4377 }
4378
Matt Carlsonef167e22007-12-20 20:10:01 -08004379 lcl_adv = 0;
4380 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
Matt Carlsonef167e22007-12-20 20:10:01 -08004382 tp->link_config.active_speed = current_speed;
4383 tp->link_config.active_duplex = current_duplex;
4384
4385 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4386 if ((bmcr & BMCR_ANENABLE) &&
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004387 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
Matt Carlson859edb22011-12-08 14:40:16 +00004388 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004389 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390 } else {
4391 if (!(bmcr & BMCR_ANENABLE) &&
4392 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08004393 tp->link_config.duplex == current_duplex &&
4394 tp->link_config.flowctrl ==
4395 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397 }
4398 }
4399
Matt Carlsonef167e22007-12-20 20:10:01 -08004400 if (current_link_up == 1 &&
Matt Carlsone348c5e2011-11-21 15:01:20 +00004401 tp->link_config.active_duplex == DUPLEX_FULL) {
4402 u32 reg, bit;
4403
4404 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4405 reg = MII_TG3_FET_GEN_STAT;
4406 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4407 } else {
4408 reg = MII_TG3_EXT_STAT;
4409 bit = MII_TG3_EXT_STAT_MDIX;
4410 }
4411
4412 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4413 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4414
Matt Carlsonef167e22007-12-20 20:10:01 -08004415 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Matt Carlsone348c5e2011-11-21 15:01:20 +00004416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417 }
4418
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419relink:
Matt Carlson80096062010-08-02 11:26:06 +00004420 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 tg3_phy_copper_begin(tp);
4422
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004423 tg3_readphy(tp, MII_BMSR, &bmsr);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00004424 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4425 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426 current_link_up = 1;
4427 }
4428
4429 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4430 if (current_link_up == 1) {
4431 if (tp->link_config.active_speed == SPEED_100 ||
4432 tp->link_config.active_speed == SPEED_10)
4433 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4434 else
4435 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004436 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
Matt Carlson7f97a4b2009-08-25 10:10:03 +00004437 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4438 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4440
4441 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4442 if (tp->link_config.active_duplex == DUPLEX_HALF)
4443 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4444
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07004446 if (current_link_up == 1 &&
4447 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07004449 else
4450 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 }
4452
4453 /* ??? Without this setting Netgear GA302T PHY does not
4454 * ??? send/receive packets...
4455 */
Matt Carlson79eb6902010-02-17 15:17:03 +00004456 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4458 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4459 tw32_f(MAC_MI_MODE, tp->mi_mode);
4460 udelay(80);
4461 }
4462
4463 tw32_f(MAC_MODE, tp->mac_mode);
4464 udelay(40);
4465
Matt Carlson52b02d02010-10-14 10:37:41 +00004466 tg3_phy_eee_adjust(tp, current_link_up);
4467
Joe Perches63c3a662011-04-26 08:12:10 +00004468 if (tg3_flag(tp, USE_LINKCHG_REG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 /* Polled via timer. */
4470 tw32_f(MAC_EVENT, 0);
4471 } else {
4472 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4473 }
4474 udelay(40);
4475
4476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4477 current_link_up == 1 &&
4478 tp->link_config.active_speed == SPEED_1000 &&
Joe Perches63c3a662011-04-26 08:12:10 +00004479 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 udelay(120);
4481 tw32_f(MAC_STATUS,
4482 (MAC_STATUS_SYNC_CHANGED |
4483 MAC_STATUS_CFG_CHANGED));
4484 udelay(40);
4485 tg3_write_mem(tp,
4486 NIC_SRAM_FIRMWARE_MBOX,
4487 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4488 }
4489
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004490 /* Prevent send BD corruption. */
Joe Perches63c3a662011-04-26 08:12:10 +00004491 if (tg3_flag(tp, CLKREQ_BUG)) {
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004492 if (tp->link_config.active_speed == SPEED_100 ||
4493 tp->link_config.active_speed == SPEED_10)
Jiang Liu0f49bfb2012-08-20 13:28:20 -06004494 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4495 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004496 else
Jiang Liu0f49bfb2012-08-20 13:28:20 -06004497 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4498 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004499 }
4500
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00004501 tg3_test_and_report_link_chg(tp, current_link_up);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502
4503 return 0;
4504}
4505
4506struct tg3_fiber_aneginfo {
4507 int state;
4508#define ANEG_STATE_UNKNOWN 0
4509#define ANEG_STATE_AN_ENABLE 1
4510#define ANEG_STATE_RESTART_INIT 2
4511#define ANEG_STATE_RESTART 3
4512#define ANEG_STATE_DISABLE_LINK_OK 4
4513#define ANEG_STATE_ABILITY_DETECT_INIT 5
4514#define ANEG_STATE_ABILITY_DETECT 6
4515#define ANEG_STATE_ACK_DETECT_INIT 7
4516#define ANEG_STATE_ACK_DETECT 8
4517#define ANEG_STATE_COMPLETE_ACK_INIT 9
4518#define ANEG_STATE_COMPLETE_ACK 10
4519#define ANEG_STATE_IDLE_DETECT_INIT 11
4520#define ANEG_STATE_IDLE_DETECT 12
4521#define ANEG_STATE_LINK_OK 13
4522#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4523#define ANEG_STATE_NEXT_PAGE_WAIT 15
4524
4525 u32 flags;
4526#define MR_AN_ENABLE 0x00000001
4527#define MR_RESTART_AN 0x00000002
4528#define MR_AN_COMPLETE 0x00000004
4529#define MR_PAGE_RX 0x00000008
4530#define MR_NP_LOADED 0x00000010
4531#define MR_TOGGLE_TX 0x00000020
4532#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4533#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4534#define MR_LP_ADV_SYM_PAUSE 0x00000100
4535#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4536#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4537#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4538#define MR_LP_ADV_NEXT_PAGE 0x00001000
4539#define MR_TOGGLE_RX 0x00002000
4540#define MR_NP_RX 0x00004000
4541
4542#define MR_LINK_OK 0x80000000
4543
4544 unsigned long link_time, cur_time;
4545
4546 u32 ability_match_cfg;
4547 int ability_match_count;
4548
4549 char ability_match, idle_match, ack_match;
4550
4551 u32 txconfig, rxconfig;
4552#define ANEG_CFG_NP 0x00000080
4553#define ANEG_CFG_ACK 0x00000040
4554#define ANEG_CFG_RF2 0x00000020
4555#define ANEG_CFG_RF1 0x00000010
4556#define ANEG_CFG_PS2 0x00000001
4557#define ANEG_CFG_PS1 0x00008000
4558#define ANEG_CFG_HD 0x00004000
4559#define ANEG_CFG_FD 0x00002000
4560#define ANEG_CFG_INVAL 0x00001f06
4561
4562};
4563#define ANEG_OK 0
4564#define ANEG_DONE 1
4565#define ANEG_TIMER_ENAB 2
4566#define ANEG_FAILED -1
4567
4568#define ANEG_STATE_SETTLE_TIME 10000
4569
4570static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4571 struct tg3_fiber_aneginfo *ap)
4572{
Matt Carlson5be73b42007-12-20 20:09:29 -08004573 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574 unsigned long delta;
4575 u32 rx_cfg_reg;
4576 int ret;
4577
4578 if (ap->state == ANEG_STATE_UNKNOWN) {
4579 ap->rxconfig = 0;
4580 ap->link_time = 0;
4581 ap->cur_time = 0;
4582 ap->ability_match_cfg = 0;
4583 ap->ability_match_count = 0;
4584 ap->ability_match = 0;
4585 ap->idle_match = 0;
4586 ap->ack_match = 0;
4587 }
4588 ap->cur_time++;
4589
4590 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4591 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4592
4593 if (rx_cfg_reg != ap->ability_match_cfg) {
4594 ap->ability_match_cfg = rx_cfg_reg;
4595 ap->ability_match = 0;
4596 ap->ability_match_count = 0;
4597 } else {
4598 if (++ap->ability_match_count > 1) {
4599 ap->ability_match = 1;
4600 ap->ability_match_cfg = rx_cfg_reg;
4601 }
4602 }
4603 if (rx_cfg_reg & ANEG_CFG_ACK)
4604 ap->ack_match = 1;
4605 else
4606 ap->ack_match = 0;
4607
4608 ap->idle_match = 0;
4609 } else {
4610 ap->idle_match = 1;
4611 ap->ability_match_cfg = 0;
4612 ap->ability_match_count = 0;
4613 ap->ability_match = 0;
4614 ap->ack_match = 0;
4615
4616 rx_cfg_reg = 0;
4617 }
4618
4619 ap->rxconfig = rx_cfg_reg;
4620 ret = ANEG_OK;
4621
Matt Carlson33f401a2010-04-05 10:19:27 +00004622 switch (ap->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 case ANEG_STATE_UNKNOWN:
4624 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4625 ap->state = ANEG_STATE_AN_ENABLE;
4626
4627 /* fallthru */
4628 case ANEG_STATE_AN_ENABLE:
4629 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4630 if (ap->flags & MR_AN_ENABLE) {
4631 ap->link_time = 0;
4632 ap->cur_time = 0;
4633 ap->ability_match_cfg = 0;
4634 ap->ability_match_count = 0;
4635 ap->ability_match = 0;
4636 ap->idle_match = 0;
4637 ap->ack_match = 0;
4638
4639 ap->state = ANEG_STATE_RESTART_INIT;
4640 } else {
4641 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4642 }
4643 break;
4644
4645 case ANEG_STATE_RESTART_INIT:
4646 ap->link_time = ap->cur_time;
4647 ap->flags &= ~(MR_NP_LOADED);
4648 ap->txconfig = 0;
4649 tw32(MAC_TX_AUTO_NEG, 0);
4650 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4651 tw32_f(MAC_MODE, tp->mac_mode);
4652 udelay(40);
4653
4654 ret = ANEG_TIMER_ENAB;
4655 ap->state = ANEG_STATE_RESTART;
4656
4657 /* fallthru */
4658 case ANEG_STATE_RESTART:
4659 delta = ap->cur_time - ap->link_time;
Matt Carlson859a588792010-04-05 10:19:28 +00004660 if (delta > ANEG_STATE_SETTLE_TIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
Matt Carlson859a588792010-04-05 10:19:28 +00004662 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 ret = ANEG_TIMER_ENAB;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 break;
4665
4666 case ANEG_STATE_DISABLE_LINK_OK:
4667 ret = ANEG_DONE;
4668 break;
4669
4670 case ANEG_STATE_ABILITY_DETECT_INIT:
4671 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08004672 ap->txconfig = ANEG_CFG_FD;
4673 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4674 if (flowctrl & ADVERTISE_1000XPAUSE)
4675 ap->txconfig |= ANEG_CFG_PS1;
4676 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4677 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4679 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4680 tw32_f(MAC_MODE, tp->mac_mode);
4681 udelay(40);
4682
4683 ap->state = ANEG_STATE_ABILITY_DETECT;
4684 break;
4685
4686 case ANEG_STATE_ABILITY_DETECT:
Matt Carlson859a588792010-04-05 10:19:28 +00004687 if (ap->ability_match != 0 && ap->rxconfig != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 ap->state = ANEG_STATE_ACK_DETECT_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689 break;
4690
4691 case ANEG_STATE_ACK_DETECT_INIT:
4692 ap->txconfig |= ANEG_CFG_ACK;
4693 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4694 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4695 tw32_f(MAC_MODE, tp->mac_mode);
4696 udelay(40);
4697
4698 ap->state = ANEG_STATE_ACK_DETECT;
4699
4700 /* fallthru */
4701 case ANEG_STATE_ACK_DETECT:
4702 if (ap->ack_match != 0) {
4703 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4704 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4705 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4706 } else {
4707 ap->state = ANEG_STATE_AN_ENABLE;
4708 }
4709 } else if (ap->ability_match != 0 &&
4710 ap->rxconfig == 0) {
4711 ap->state = ANEG_STATE_AN_ENABLE;
4712 }
4713 break;
4714
4715 case ANEG_STATE_COMPLETE_ACK_INIT:
4716 if (ap->rxconfig & ANEG_CFG_INVAL) {
4717 ret = ANEG_FAILED;
4718 break;
4719 }
4720 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4721 MR_LP_ADV_HALF_DUPLEX |
4722 MR_LP_ADV_SYM_PAUSE |
4723 MR_LP_ADV_ASYM_PAUSE |
4724 MR_LP_ADV_REMOTE_FAULT1 |
4725 MR_LP_ADV_REMOTE_FAULT2 |
4726 MR_LP_ADV_NEXT_PAGE |
4727 MR_TOGGLE_RX |
4728 MR_NP_RX);
4729 if (ap->rxconfig & ANEG_CFG_FD)
4730 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4731 if (ap->rxconfig & ANEG_CFG_HD)
4732 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4733 if (ap->rxconfig & ANEG_CFG_PS1)
4734 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4735 if (ap->rxconfig & ANEG_CFG_PS2)
4736 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4737 if (ap->rxconfig & ANEG_CFG_RF1)
4738 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4739 if (ap->rxconfig & ANEG_CFG_RF2)
4740 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4741 if (ap->rxconfig & ANEG_CFG_NP)
4742 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4743
4744 ap->link_time = ap->cur_time;
4745
4746 ap->flags ^= (MR_TOGGLE_TX);
4747 if (ap->rxconfig & 0x0008)
4748 ap->flags |= MR_TOGGLE_RX;
4749 if (ap->rxconfig & ANEG_CFG_NP)
4750 ap->flags |= MR_NP_RX;
4751 ap->flags |= MR_PAGE_RX;
4752
4753 ap->state = ANEG_STATE_COMPLETE_ACK;
4754 ret = ANEG_TIMER_ENAB;
4755 break;
4756
4757 case ANEG_STATE_COMPLETE_ACK:
4758 if (ap->ability_match != 0 &&
4759 ap->rxconfig == 0) {
4760 ap->state = ANEG_STATE_AN_ENABLE;
4761 break;
4762 }
4763 delta = ap->cur_time - ap->link_time;
4764 if (delta > ANEG_STATE_SETTLE_TIME) {
4765 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4766 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4767 } else {
4768 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4769 !(ap->flags & MR_NP_RX)) {
4770 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4771 } else {
4772 ret = ANEG_FAILED;
4773 }
4774 }
4775 }
4776 break;
4777
4778 case ANEG_STATE_IDLE_DETECT_INIT:
4779 ap->link_time = ap->cur_time;
4780 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4781 tw32_f(MAC_MODE, tp->mac_mode);
4782 udelay(40);
4783
4784 ap->state = ANEG_STATE_IDLE_DETECT;
4785 ret = ANEG_TIMER_ENAB;
4786 break;
4787
4788 case ANEG_STATE_IDLE_DETECT:
4789 if (ap->ability_match != 0 &&
4790 ap->rxconfig == 0) {
4791 ap->state = ANEG_STATE_AN_ENABLE;
4792 break;
4793 }
4794 delta = ap->cur_time - ap->link_time;
4795 if (delta > ANEG_STATE_SETTLE_TIME) {
4796 /* XXX another gem from the Broadcom driver :( */
4797 ap->state = ANEG_STATE_LINK_OK;
4798 }
4799 break;
4800
4801 case ANEG_STATE_LINK_OK:
4802 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4803 ret = ANEG_DONE;
4804 break;
4805
4806 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4807 /* ??? unimplemented */
4808 break;
4809
4810 case ANEG_STATE_NEXT_PAGE_WAIT:
4811 /* ??? unimplemented */
4812 break;
4813
4814 default:
4815 ret = ANEG_FAILED;
4816 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004817 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818
4819 return ret;
4820}
4821
Matt Carlson5be73b42007-12-20 20:09:29 -08004822static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823{
4824 int res = 0;
4825 struct tg3_fiber_aneginfo aninfo;
4826 int status = ANEG_FAILED;
4827 unsigned int tick;
4828 u32 tmp;
4829
4830 tw32_f(MAC_TX_AUTO_NEG, 0);
4831
4832 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4833 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4834 udelay(40);
4835
4836 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4837 udelay(40);
4838
4839 memset(&aninfo, 0, sizeof(aninfo));
4840 aninfo.flags |= MR_AN_ENABLE;
4841 aninfo.state = ANEG_STATE_UNKNOWN;
4842 aninfo.cur_time = 0;
4843 tick = 0;
4844 while (++tick < 195000) {
4845 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4846 if (status == ANEG_DONE || status == ANEG_FAILED)
4847 break;
4848
4849 udelay(1);
4850 }
4851
4852 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4853 tw32_f(MAC_MODE, tp->mac_mode);
4854 udelay(40);
4855
Matt Carlson5be73b42007-12-20 20:09:29 -08004856 *txflags = aninfo.txconfig;
4857 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004858
4859 if (status == ANEG_DONE &&
4860 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4861 MR_LP_ADV_FULL_DUPLEX)))
4862 res = 1;
4863
4864 return res;
4865}
4866
4867static void tg3_init_bcm8002(struct tg3 *tp)
4868{
4869 u32 mac_status = tr32(MAC_STATUS);
4870 int i;
4871
4872 /* Reset when initting first time or we have a link. */
Joe Perches63c3a662011-04-26 08:12:10 +00004873 if (tg3_flag(tp, INIT_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 !(mac_status & MAC_STATUS_PCS_SYNCED))
4875 return;
4876
4877 /* Set PLL lock range. */
4878 tg3_writephy(tp, 0x16, 0x8007);
4879
4880 /* SW reset */
4881 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4882
4883 /* Wait for reset to complete. */
4884 /* XXX schedule_timeout() ... */
4885 for (i = 0; i < 500; i++)
4886 udelay(10);
4887
4888 /* Config mode; select PMA/Ch 1 regs. */
4889 tg3_writephy(tp, 0x10, 0x8411);
4890
4891 /* Enable auto-lock and comdet, select txclk for tx. */
4892 tg3_writephy(tp, 0x11, 0x0a10);
4893
4894 tg3_writephy(tp, 0x18, 0x00a0);
4895 tg3_writephy(tp, 0x16, 0x41ff);
4896
4897 /* Assert and deassert POR. */
4898 tg3_writephy(tp, 0x13, 0x0400);
4899 udelay(40);
4900 tg3_writephy(tp, 0x13, 0x0000);
4901
4902 tg3_writephy(tp, 0x11, 0x0a50);
4903 udelay(40);
4904 tg3_writephy(tp, 0x11, 0x0a10);
4905
4906 /* Wait for signal to stabilize */
4907 /* XXX schedule_timeout() ... */
4908 for (i = 0; i < 15000; i++)
4909 udelay(10);
4910
4911 /* Deselect the channel register so we can read the PHYID
4912 * later.
4913 */
4914 tg3_writephy(tp, 0x10, 0x8011);
4915}
4916
4917static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4918{
Matt Carlson82cd3d12007-12-20 20:09:00 -08004919 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 u32 sg_dig_ctrl, sg_dig_status;
4921 u32 serdes_cfg, expected_sg_dig_ctrl;
4922 int workaround, port_a;
4923 int current_link_up;
4924
4925 serdes_cfg = 0;
4926 expected_sg_dig_ctrl = 0;
4927 workaround = 0;
4928 port_a = 1;
4929 current_link_up = 0;
4930
4931 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4932 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4933 workaround = 1;
4934 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4935 port_a = 0;
4936
4937 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4938 /* preserve bits 20-23 for voltage regulator */
4939 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4940 }
4941
4942 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4943
4944 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004945 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946 if (workaround) {
4947 u32 val = serdes_cfg;
4948
4949 if (port_a)
4950 val |= 0xc010000;
4951 else
4952 val |= 0x4010000;
4953 tw32_f(MAC_SERDES_CFG, val);
4954 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004955
4956 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 }
4958 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4959 tg3_setup_flow_control(tp, 0, 0);
4960 current_link_up = 1;
4961 }
4962 goto out;
4963 }
4964
4965 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004966 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967
Matt Carlson82cd3d12007-12-20 20:09:00 -08004968 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4969 if (flowctrl & ADVERTISE_1000XPAUSE)
4970 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4971 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4972 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973
4974 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004975 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
Michael Chan3d3ebe72006-09-27 15:59:15 -07004976 tp->serdes_counter &&
4977 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4978 MAC_STATUS_RCVD_CFG)) ==
4979 MAC_STATUS_PCS_SYNCED)) {
4980 tp->serdes_counter--;
4981 current_link_up = 1;
4982 goto out;
4983 }
4984restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 if (workaround)
4986 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004987 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988 udelay(5);
4989 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4990
Michael Chan3d3ebe72006-09-27 15:59:15 -07004991 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004992 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4994 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07004995 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996 mac_status = tr32(MAC_STATUS);
4997
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004998 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08005000 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001
Matt Carlson82cd3d12007-12-20 20:09:00 -08005002 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5003 local_adv |= ADVERTISE_1000XPAUSE;
5004 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5005 local_adv |= ADVERTISE_1000XPSE_ASYM;
5006
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005007 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08005008 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005009 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08005010 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011
Matt Carlson859edb22011-12-08 14:40:16 +00005012 tp->link_config.rmt_adv =
5013 mii_adv_to_ethtool_adv_x(remote_adv);
5014
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015 tg3_setup_flow_control(tp, local_adv, remote_adv);
5016 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005017 tp->serdes_counter = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005018 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005019 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07005020 if (tp->serdes_counter)
5021 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022 else {
5023 if (workaround) {
5024 u32 val = serdes_cfg;
5025
5026 if (port_a)
5027 val |= 0xc010000;
5028 else
5029 val |= 0x4010000;
5030
5031 tw32_f(MAC_SERDES_CFG, val);
5032 }
5033
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005034 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 udelay(40);
5036
5037 /* Link parallel detection - link is up */
5038 /* only if we have PCS_SYNC and not */
5039 /* receiving config code words */
5040 mac_status = tr32(MAC_STATUS);
5041 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5042 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5043 tg3_setup_flow_control(tp, 0, 0);
5044 current_link_up = 1;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005045 tp->phy_flags |=
5046 TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005047 tp->serdes_counter =
5048 SERDES_PARALLEL_DET_TIMEOUT;
5049 } else
5050 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 }
5052 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07005053 } else {
5054 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005055 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 }
5057
5058out:
5059 return current_link_up;
5060}
5061
5062static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5063{
5064 int current_link_up = 0;
5065
Michael Chan5cf64b8a2007-05-05 12:11:21 -07005066 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068
5069 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08005070 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005072
Matt Carlson5be73b42007-12-20 20:09:29 -08005073 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5074 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075
Matt Carlson5be73b42007-12-20 20:09:29 -08005076 if (txflags & ANEG_CFG_PS1)
5077 local_adv |= ADVERTISE_1000XPAUSE;
5078 if (txflags & ANEG_CFG_PS2)
5079 local_adv |= ADVERTISE_1000XPSE_ASYM;
5080
5081 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5082 remote_adv |= LPA_1000XPAUSE;
5083 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5084 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085
Matt Carlson859edb22011-12-08 14:40:16 +00005086 tp->link_config.rmt_adv =
5087 mii_adv_to_ethtool_adv_x(remote_adv);
5088
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 tg3_setup_flow_control(tp, local_adv, remote_adv);
5090
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 current_link_up = 1;
5092 }
5093 for (i = 0; i < 30; i++) {
5094 udelay(20);
5095 tw32_f(MAC_STATUS,
5096 (MAC_STATUS_SYNC_CHANGED |
5097 MAC_STATUS_CFG_CHANGED));
5098 udelay(40);
5099 if ((tr32(MAC_STATUS) &
5100 (MAC_STATUS_SYNC_CHANGED |
5101 MAC_STATUS_CFG_CHANGED)) == 0)
5102 break;
5103 }
5104
5105 mac_status = tr32(MAC_STATUS);
5106 if (current_link_up == 0 &&
5107 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5108 !(mac_status & MAC_STATUS_RCVD_CFG))
5109 current_link_up = 1;
5110 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08005111 tg3_setup_flow_control(tp, 0, 0);
5112
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113 /* Forcing 1000FD link up. */
5114 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115
5116 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5117 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07005118
5119 tw32_f(MAC_MODE, tp->mac_mode);
5120 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 }
5122
5123out:
5124 return current_link_up;
5125}
5126
5127static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5128{
5129 u32 orig_pause_cfg;
5130 u16 orig_active_speed;
5131 u8 orig_active_duplex;
5132 u32 mac_status;
5133 int current_link_up;
5134 int i;
5135
Matt Carlson8d018622007-12-20 20:05:44 -08005136 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137 orig_active_speed = tp->link_config.active_speed;
5138 orig_active_duplex = tp->link_config.active_duplex;
5139
Joe Perches63c3a662011-04-26 08:12:10 +00005140 if (!tg3_flag(tp, HW_AUTONEG) &&
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005141 tp->link_up &&
Joe Perches63c3a662011-04-26 08:12:10 +00005142 tg3_flag(tp, INIT_COMPLETE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 mac_status = tr32(MAC_STATUS);
5144 mac_status &= (MAC_STATUS_PCS_SYNCED |
5145 MAC_STATUS_SIGNAL_DET |
5146 MAC_STATUS_CFG_CHANGED |
5147 MAC_STATUS_RCVD_CFG);
5148 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5149 MAC_STATUS_SIGNAL_DET)) {
5150 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5151 MAC_STATUS_CFG_CHANGED));
5152 return 0;
5153 }
5154 }
5155
5156 tw32_f(MAC_TX_AUTO_NEG, 0);
5157
5158 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5159 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5160 tw32_f(MAC_MODE, tp->mac_mode);
5161 udelay(40);
5162
Matt Carlson79eb6902010-02-17 15:17:03 +00005163 if (tp->phy_id == TG3_PHY_ID_BCM8002)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 tg3_init_bcm8002(tp);
5165
5166 /* Enable link change event even when serdes polling. */
5167 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5168 udelay(40);
5169
5170 current_link_up = 0;
Matt Carlson859edb22011-12-08 14:40:16 +00005171 tp->link_config.rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172 mac_status = tr32(MAC_STATUS);
5173
Joe Perches63c3a662011-04-26 08:12:10 +00005174 if (tg3_flag(tp, HW_AUTONEG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5176 else
5177 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5178
Matt Carlson898a56f2009-08-28 14:02:40 +00005179 tp->napi[0].hw_status->status =
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 (SD_STATUS_UPDATED |
Matt Carlson898a56f2009-08-28 14:02:40 +00005181 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182
5183 for (i = 0; i < 100; i++) {
5184 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5185 MAC_STATUS_CFG_CHANGED));
5186 udelay(5);
5187 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07005188 MAC_STATUS_CFG_CHANGED |
5189 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 break;
5191 }
5192
5193 mac_status = tr32(MAC_STATUS);
5194 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5195 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005196 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5197 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198 tw32_f(MAC_MODE, (tp->mac_mode |
5199 MAC_MODE_SEND_CONFIGS));
5200 udelay(1);
5201 tw32_f(MAC_MODE, tp->mac_mode);
5202 }
5203 }
5204
5205 if (current_link_up == 1) {
5206 tp->link_config.active_speed = SPEED_1000;
5207 tp->link_config.active_duplex = DUPLEX_FULL;
5208 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5209 LED_CTRL_LNKLED_OVERRIDE |
5210 LED_CTRL_1000MBPS_ON));
5211 } else {
Matt Carlsone7405222012-02-13 15:20:16 +00005212 tp->link_config.active_speed = SPEED_UNKNOWN;
5213 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5215 LED_CTRL_LNKLED_OVERRIDE |
5216 LED_CTRL_TRAFFIC_OVERRIDE));
5217 }
5218
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005219 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
Matt Carlson8d018622007-12-20 20:05:44 -08005220 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 if (orig_pause_cfg != now_pause_cfg ||
5222 orig_active_speed != tp->link_config.active_speed ||
5223 orig_active_duplex != tp->link_config.active_duplex)
5224 tg3_link_report(tp);
5225 }
5226
5227 return 0;
5228}
5229
Michael Chan747e8f82005-07-25 12:33:22 -07005230static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5231{
5232 int current_link_up, err = 0;
5233 u32 bmsr, bmcr;
5234 u16 current_speed;
5235 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08005236 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07005237
5238 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5239 tw32_f(MAC_MODE, tp->mac_mode);
5240 udelay(40);
5241
5242 tw32(MAC_EVENT, 0);
5243
5244 tw32_f(MAC_STATUS,
5245 (MAC_STATUS_SYNC_CHANGED |
5246 MAC_STATUS_CFG_CHANGED |
5247 MAC_STATUS_MI_COMPLETION |
5248 MAC_STATUS_LNKSTATE_CHANGED));
5249 udelay(40);
5250
5251 if (force_reset)
5252 tg3_phy_reset(tp);
5253
5254 current_link_up = 0;
Matt Carlsone7405222012-02-13 15:20:16 +00005255 current_speed = SPEED_UNKNOWN;
5256 current_duplex = DUPLEX_UNKNOWN;
Matt Carlson859edb22011-12-08 14:40:16 +00005257 tp->link_config.rmt_adv = 0;
Michael Chan747e8f82005-07-25 12:33:22 -07005258
5259 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5260 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08005261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5262 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5263 bmsr |= BMSR_LSTATUS;
5264 else
5265 bmsr &= ~BMSR_LSTATUS;
5266 }
Michael Chan747e8f82005-07-25 12:33:22 -07005267
5268 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5269
5270 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005271 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07005272 /* do nothing, just check for link up at the end */
5273 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson28011cf2011-11-16 18:36:59 -05005274 u32 adv, newadv;
Michael Chan747e8f82005-07-25 12:33:22 -07005275
5276 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
Matt Carlson28011cf2011-11-16 18:36:59 -05005277 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5278 ADVERTISE_1000XPAUSE |
5279 ADVERTISE_1000XPSE_ASYM |
5280 ADVERTISE_SLCT);
Michael Chan747e8f82005-07-25 12:33:22 -07005281
Matt Carlson28011cf2011-11-16 18:36:59 -05005282 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Matt Carlson37f07022011-11-17 14:30:55 +00005283 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
Michael Chan747e8f82005-07-25 12:33:22 -07005284
Matt Carlson28011cf2011-11-16 18:36:59 -05005285 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5286 tg3_writephy(tp, MII_ADVERTISE, newadv);
Michael Chan747e8f82005-07-25 12:33:22 -07005287 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5288 tg3_writephy(tp, MII_BMCR, bmcr);
5289
5290 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07005291 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005292 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005293
5294 return err;
5295 }
5296 } else {
5297 u32 new_bmcr;
5298
5299 bmcr &= ~BMCR_SPEED1000;
5300 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5301
5302 if (tp->link_config.duplex == DUPLEX_FULL)
5303 new_bmcr |= BMCR_FULLDPLX;
5304
5305 if (new_bmcr != bmcr) {
5306 /* BMCR_SPEED1000 is a reserved bit that needs
5307 * to be set on write.
5308 */
5309 new_bmcr |= BMCR_SPEED1000;
5310
5311 /* Force a linkdown */
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005312 if (tp->link_up) {
Michael Chan747e8f82005-07-25 12:33:22 -07005313 u32 adv;
5314
5315 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5316 adv &= ~(ADVERTISE_1000XFULL |
5317 ADVERTISE_1000XHALF |
5318 ADVERTISE_SLCT);
5319 tg3_writephy(tp, MII_ADVERTISE, adv);
5320 tg3_writephy(tp, MII_BMCR, bmcr |
5321 BMCR_ANRESTART |
5322 BMCR_ANENABLE);
5323 udelay(10);
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005324 tg3_carrier_off(tp);
Michael Chan747e8f82005-07-25 12:33:22 -07005325 }
5326 tg3_writephy(tp, MII_BMCR, new_bmcr);
5327 bmcr = new_bmcr;
5328 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5329 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08005330 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5331 ASIC_REV_5714) {
5332 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5333 bmsr |= BMSR_LSTATUS;
5334 else
5335 bmsr &= ~BMSR_LSTATUS;
5336 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005337 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005338 }
5339 }
5340
5341 if (bmsr & BMSR_LSTATUS) {
5342 current_speed = SPEED_1000;
5343 current_link_up = 1;
5344 if (bmcr & BMCR_FULLDPLX)
5345 current_duplex = DUPLEX_FULL;
5346 else
5347 current_duplex = DUPLEX_HALF;
5348
Matt Carlsonef167e22007-12-20 20:10:01 -08005349 local_adv = 0;
5350 remote_adv = 0;
5351
Michael Chan747e8f82005-07-25 12:33:22 -07005352 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08005353 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07005354
5355 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5356 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5357 common = local_adv & remote_adv;
5358 if (common & (ADVERTISE_1000XHALF |
5359 ADVERTISE_1000XFULL)) {
5360 if (common & ADVERTISE_1000XFULL)
5361 current_duplex = DUPLEX_FULL;
5362 else
5363 current_duplex = DUPLEX_HALF;
Matt Carlson859edb22011-12-08 14:40:16 +00005364
5365 tp->link_config.rmt_adv =
5366 mii_adv_to_ethtool_adv_x(remote_adv);
Joe Perches63c3a662011-04-26 08:12:10 +00005367 } else if (!tg3_flag(tp, 5780_CLASS)) {
Matt Carlson57d8b882010-06-05 17:24:35 +00005368 /* Link is up via parallel detect */
Matt Carlson859a588792010-04-05 10:19:28 +00005369 } else {
Michael Chan747e8f82005-07-25 12:33:22 -07005370 current_link_up = 0;
Matt Carlson859a588792010-04-05 10:19:28 +00005371 }
Michael Chan747e8f82005-07-25 12:33:22 -07005372 }
5373 }
5374
Matt Carlsonef167e22007-12-20 20:10:01 -08005375 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5376 tg3_setup_flow_control(tp, local_adv, remote_adv);
5377
Michael Chan747e8f82005-07-25 12:33:22 -07005378 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5379 if (tp->link_config.active_duplex == DUPLEX_HALF)
5380 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5381
5382 tw32_f(MAC_MODE, tp->mac_mode);
5383 udelay(40);
5384
5385 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5386
5387 tp->link_config.active_speed = current_speed;
5388 tp->link_config.active_duplex = current_duplex;
5389
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005390 tg3_test_and_report_link_chg(tp, current_link_up);
Michael Chan747e8f82005-07-25 12:33:22 -07005391 return err;
5392}
5393
5394static void tg3_serdes_parallel_detect(struct tg3 *tp)
5395{
Michael Chan3d3ebe72006-09-27 15:59:15 -07005396 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07005397 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07005398 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07005399 return;
5400 }
Matt Carlsonc6cdf432010-04-05 10:19:26 +00005401
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005402 if (!tp->link_up &&
Michael Chan747e8f82005-07-25 12:33:22 -07005403 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5404 u32 bmcr;
5405
5406 tg3_readphy(tp, MII_BMCR, &bmcr);
5407 if (bmcr & BMCR_ANENABLE) {
5408 u32 phy1, phy2;
5409
5410 /* Select shadow register 0x1f */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005411 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5412 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
Michael Chan747e8f82005-07-25 12:33:22 -07005413
5414 /* Select expansion interrupt status register */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005415 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5416 MII_TG3_DSP_EXP1_INT_STAT);
5417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5418 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
Michael Chan747e8f82005-07-25 12:33:22 -07005419
5420 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5421 /* We have signal detect and not receiving
5422 * config code words, link is up by parallel
5423 * detection.
5424 */
5425
5426 bmcr &= ~BMCR_ANENABLE;
5427 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5428 tg3_writephy(tp, MII_BMCR, bmcr);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005429 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005430 }
5431 }
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005432 } else if (tp->link_up &&
Matt Carlson859a588792010-04-05 10:19:28 +00005433 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005434 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07005435 u32 phy2;
5436
5437 /* Select expansion interrupt status register */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005438 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5439 MII_TG3_DSP_EXP1_INT_STAT);
5440 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
Michael Chan747e8f82005-07-25 12:33:22 -07005441 if (phy2 & 0x20) {
5442 u32 bmcr;
5443
5444 /* Config code words received, turn on autoneg. */
5445 tg3_readphy(tp, MII_BMCR, &bmcr);
5446 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5447
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005448 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005449
5450 }
5451 }
5452}
5453
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5455{
Matt Carlsonf2096f92011-04-05 14:22:48 +00005456 u32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005457 int err;
5458
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005459 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460 err = tg3_setup_fiber_phy(tp, force_reset);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005461 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chan747e8f82005-07-25 12:33:22 -07005462 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Matt Carlson859a588792010-04-05 10:19:28 +00005463 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464 err = tg3_setup_copper_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465
Matt Carlsonbcb37f62008-11-03 16:52:09 -08005466 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonf2096f92011-04-05 14:22:48 +00005467 u32 scale;
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08005468
5469 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5470 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5471 scale = 65;
5472 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5473 scale = 6;
5474 else
5475 scale = 12;
5476
5477 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5478 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5479 tw32(GRC_MISC_CFG, val);
5480 }
5481
Matt Carlsonf2096f92011-04-05 14:22:48 +00005482 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5483 (6 << TX_LENGTHS_IPG_SHIFT);
5484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5485 val |= tr32(MAC_TX_LENGTHS) &
5486 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5487 TX_LENGTHS_CNT_DWN_VAL_MSK);
5488
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 if (tp->link_config.active_speed == SPEED_1000 &&
5490 tp->link_config.active_duplex == DUPLEX_HALF)
Matt Carlsonf2096f92011-04-05 14:22:48 +00005491 tw32(MAC_TX_LENGTHS, val |
5492 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005493 else
Matt Carlsonf2096f92011-04-05 14:22:48 +00005494 tw32(MAC_TX_LENGTHS, val |
5495 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496
Joe Perches63c3a662011-04-26 08:12:10 +00005497 if (!tg3_flag(tp, 5705_PLUS)) {
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005498 if (tp->link_up) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005499 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07005500 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005501 } else {
5502 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5503 }
5504 }
5505
Joe Perches63c3a662011-04-26 08:12:10 +00005506 if (tg3_flag(tp, ASPM_WORKAROUND)) {
Matt Carlsonf2096f92011-04-05 14:22:48 +00005507 val = tr32(PCIE_PWR_MGMT_THRESH);
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005508 if (!tp->link_up)
Matt Carlson8ed5d972007-05-07 00:25:49 -07005509 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5510 tp->pwrmgmt_thresh;
5511 else
5512 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5513 tw32(PCIE_PWR_MGMT_THRESH, val);
5514 }
5515
Linus Torvalds1da177e2005-04-16 15:20:36 -07005516 return err;
5517}
5518
Matt Carlson66cfd1b2010-09-30 10:34:30 +00005519static inline int tg3_irq_sync(struct tg3 *tp)
5520{
5521 return tp->irq_sync;
5522}
5523
Matt Carlson97bd8e42011-04-13 11:05:04 +00005524static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5525{
5526 int i;
5527
5528 dst = (u32 *)((u8 *)dst + off);
5529 for (i = 0; i < len; i += sizeof(u32))
5530 *dst++ = tr32(off + i);
5531}
5532
5533static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5534{
5535 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5536 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5537 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5538 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5539 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5540 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5541 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5542 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5543 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5544 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5545 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5546 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5547 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5548 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5549 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5550 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5551 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5552 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5553 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5554
Joe Perches63c3a662011-04-26 08:12:10 +00005555 if (tg3_flag(tp, SUPPORT_MSIX))
Matt Carlson97bd8e42011-04-13 11:05:04 +00005556 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5557
5558 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5559 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5560 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5561 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5562 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5563 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5564 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5565 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5566
Joe Perches63c3a662011-04-26 08:12:10 +00005567 if (!tg3_flag(tp, 5705_PLUS)) {
Matt Carlson97bd8e42011-04-13 11:05:04 +00005568 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5569 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5570 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5571 }
5572
5573 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5574 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5575 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5576 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5577 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5578
Joe Perches63c3a662011-04-26 08:12:10 +00005579 if (tg3_flag(tp, NVRAM))
Matt Carlson97bd8e42011-04-13 11:05:04 +00005580 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5581}
5582
5583static void tg3_dump_state(struct tg3 *tp)
5584{
5585 int i;
5586 u32 *regs;
5587
5588 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5589 if (!regs) {
5590 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5591 return;
5592 }
5593
Joe Perches63c3a662011-04-26 08:12:10 +00005594 if (tg3_flag(tp, PCI_EXPRESS)) {
Matt Carlson97bd8e42011-04-13 11:05:04 +00005595 /* Read up to but not including private PCI registers */
5596 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5597 regs[i / sizeof(u32)] = tr32(i);
5598 } else
5599 tg3_dump_legacy_regs(tp, regs);
5600
5601 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5602 if (!regs[i + 0] && !regs[i + 1] &&
5603 !regs[i + 2] && !regs[i + 3])
5604 continue;
5605
5606 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5607 i * 4,
5608 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5609 }
5610
5611 kfree(regs);
5612
5613 for (i = 0; i < tp->irq_cnt; i++) {
5614 struct tg3_napi *tnapi = &tp->napi[i];
5615
5616 /* SW status block */
5617 netdev_err(tp->dev,
5618 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5619 i,
5620 tnapi->hw_status->status,
5621 tnapi->hw_status->status_tag,
5622 tnapi->hw_status->rx_jumbo_consumer,
5623 tnapi->hw_status->rx_consumer,
5624 tnapi->hw_status->rx_mini_consumer,
5625 tnapi->hw_status->idx[0].rx_producer,
5626 tnapi->hw_status->idx[0].tx_consumer);
5627
5628 netdev_err(tp->dev,
5629 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5630 i,
5631 tnapi->last_tag, tnapi->last_irq_tag,
5632 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5633 tnapi->rx_rcb_ptr,
5634 tnapi->prodring.rx_std_prod_idx,
5635 tnapi->prodring.rx_std_cons_idx,
5636 tnapi->prodring.rx_jmb_prod_idx,
5637 tnapi->prodring.rx_jmb_cons_idx);
5638 }
5639}
5640
Michael Chandf3e6542006-05-26 17:48:07 -07005641/* This is called whenever we suspect that the system chipset is re-
5642 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5643 * is bogus tx completions. We try to recover by setting the
5644 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5645 * in the workqueue.
5646 */
5647static void tg3_tx_recover(struct tg3 *tp)
5648{
Joe Perches63c3a662011-04-26 08:12:10 +00005649 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
Michael Chandf3e6542006-05-26 17:48:07 -07005650 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5651
Matt Carlson5129c3a2010-04-05 10:19:23 +00005652 netdev_warn(tp->dev,
5653 "The system may be re-ordering memory-mapped I/O "
5654 "cycles to the network device, attempting to recover. "
5655 "Please report the problem to the driver maintainer "
5656 "and include system chipset information.\n");
Michael Chandf3e6542006-05-26 17:48:07 -07005657
5658 spin_lock(&tp->lock);
Joe Perches63c3a662011-04-26 08:12:10 +00005659 tg3_flag_set(tp, TX_RECOVERY_PENDING);
Michael Chandf3e6542006-05-26 17:48:07 -07005660 spin_unlock(&tp->lock);
5661}
5662
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005663static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
Michael Chan1b2a7202006-08-07 21:46:02 -07005664{
Matt Carlsonf65aac12010-08-02 11:26:03 +00005665 /* Tell compiler to fetch tx indices from memory. */
5666 barrier();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005667 return tnapi->tx_pending -
5668 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
Michael Chan1b2a7202006-08-07 21:46:02 -07005669}
5670
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671/* Tigon3 never reports partial packet sends. So we do not
5672 * need special logic to handle SKBs that have not had all
5673 * of their frags sent yet, like SunGEM does.
5674 */
Matt Carlson17375d22009-08-28 14:02:18 +00005675static void tg3_tx(struct tg3_napi *tnapi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005676{
Matt Carlson17375d22009-08-28 14:02:18 +00005677 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00005678 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005679 u32 sw_idx = tnapi->tx_cons;
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005680 struct netdev_queue *txq;
5681 int index = tnapi - tp->napi;
Tom Herbert298376d2011-11-28 16:33:30 +00005682 unsigned int pkts_compl = 0, bytes_compl = 0;
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005683
Joe Perches63c3a662011-04-26 08:12:10 +00005684 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005685 index--;
5686
5687 txq = netdev_get_tx_queue(tp->dev, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005688
5689 while (sw_idx != hw_idx) {
Matt Carlsondf8944c2011-07-27 14:20:46 +00005690 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005691 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07005692 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005693
Michael Chandf3e6542006-05-26 17:48:07 -07005694 if (unlikely(skb == NULL)) {
5695 tg3_tx_recover(tp);
5696 return;
5697 }
5698
Alexander Duyckf4188d82009-12-02 16:48:38 +00005699 pci_unmap_single(tp->pdev,
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005700 dma_unmap_addr(ri, mapping),
Alexander Duyckf4188d82009-12-02 16:48:38 +00005701 skb_headlen(skb),
5702 PCI_DMA_TODEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703
5704 ri->skb = NULL;
5705
Matt Carlsone01ee142011-07-27 14:20:50 +00005706 while (ri->fragmented) {
5707 ri->fragmented = false;
5708 sw_idx = NEXT_TX(sw_idx);
5709 ri = &tnapi->tx_buffers[sw_idx];
5710 }
5711
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712 sw_idx = NEXT_TX(sw_idx);
5713
5714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005715 ri = &tnapi->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07005716 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5717 tx_bug = 1;
Alexander Duyckf4188d82009-12-02 16:48:38 +00005718
5719 pci_unmap_page(tp->pdev,
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005720 dma_unmap_addr(ri, mapping),
Eric Dumazet9e903e02011-10-18 21:00:24 +00005721 skb_frag_size(&skb_shinfo(skb)->frags[i]),
Alexander Duyckf4188d82009-12-02 16:48:38 +00005722 PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00005723
5724 while (ri->fragmented) {
5725 ri->fragmented = false;
5726 sw_idx = NEXT_TX(sw_idx);
5727 ri = &tnapi->tx_buffers[sw_idx];
5728 }
5729
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730 sw_idx = NEXT_TX(sw_idx);
5731 }
5732
Tom Herbert298376d2011-11-28 16:33:30 +00005733 pkts_compl++;
5734 bytes_compl += skb->len;
5735
David S. Millerf47c11e2005-06-24 20:18:35 -07005736 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07005737
5738 if (unlikely(tx_bug)) {
5739 tg3_tx_recover(tp);
5740 return;
5741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742 }
5743
Tom Herbert5cb917b2012-03-05 19:53:50 +00005744 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
Tom Herbert298376d2011-11-28 16:33:30 +00005745
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005746 tnapi->tx_cons = sw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005747
Michael Chan1b2a7202006-08-07 21:46:02 -07005748 /* Need to make the tx_cons update visible to tg3_start_xmit()
5749 * before checking for netif_queue_stopped(). Without the
5750 * memory barrier, there is a small possibility that tg3_start_xmit()
5751 * will miss it and cause the queue to be stopped forever.
5752 */
5753 smp_mb();
5754
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005755 if (unlikely(netif_tx_queue_stopped(txq) &&
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005756 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005757 __netif_tx_lock(txq, smp_processor_id());
5758 if (netif_tx_queue_stopped(txq) &&
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005759 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005760 netif_tx_wake_queue(txq);
5761 __netif_tx_unlock(txq);
Michael Chan51b91462005-09-01 17:41:28 -07005762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763}
5764
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005765static void tg3_frag_free(bool is_frag, void *data)
5766{
5767 if (is_frag)
5768 put_page(virt_to_head_page(data));
5769 else
5770 kfree(data);
5771}
5772
Eric Dumazet9205fd92011-11-18 06:47:01 +00005773static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005774{
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005775 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5776 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5777
Eric Dumazet9205fd92011-11-18 06:47:01 +00005778 if (!ri->data)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005779 return;
5780
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005781 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005782 map_sz, PCI_DMA_FROMDEVICE);
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00005783 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005784 ri->data = NULL;
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005785}
5786
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005787
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788/* Returns size of skb allocated or < 0 on error.
5789 *
5790 * We only need to fill in the address because the other members
5791 * of the RX descriptor are invariant, see tg3_init_rings.
5792 *
5793 * Note the purposeful assymetry of cpu vs. chip accesses. For
5794 * posting buffers we only dirty the first cache line of the RX
5795 * descriptor (containing the address). Whereas for the RX status
5796 * buffers the cpu only reads the last cacheline of the RX descriptor
5797 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5798 */
Eric Dumazet9205fd92011-11-18 06:47:01 +00005799static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005800 u32 opaque_key, u32 dest_idx_unmasked,
5801 unsigned int *frag_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005802{
5803 struct tg3_rx_buffer_desc *desc;
Matt Carlsonf94e2902010-10-14 10:37:42 +00005804 struct ring_info *map;
Eric Dumazet9205fd92011-11-18 06:47:01 +00005805 u8 *data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005806 dma_addr_t mapping;
Eric Dumazet9205fd92011-11-18 06:47:01 +00005807 int skb_size, data_size, dest_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809 switch (opaque_key) {
5810 case RXD_OPAQUE_RING_STD:
Matt Carlson2c49a442010-09-30 10:34:35 +00005811 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
Matt Carlson21f581a2009-08-28 14:00:25 +00005812 desc = &tpr->rx_std[dest_idx];
5813 map = &tpr->rx_std_buffers[dest_idx];
Eric Dumazet9205fd92011-11-18 06:47:01 +00005814 data_size = tp->rx_pkt_map_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815 break;
5816
5817 case RXD_OPAQUE_RING_JUMBO:
Matt Carlson2c49a442010-09-30 10:34:35 +00005818 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
Matt Carlson79ed5ac2009-08-28 14:00:55 +00005819 desc = &tpr->rx_jmb[dest_idx].std;
Matt Carlson21f581a2009-08-28 14:00:25 +00005820 map = &tpr->rx_jmb_buffers[dest_idx];
Eric Dumazet9205fd92011-11-18 06:47:01 +00005821 data_size = TG3_RX_JMB_MAP_SZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005822 break;
5823
5824 default:
5825 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005826 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005827
5828 /* Do not overwrite any of the map or rp information
5829 * until we are sure we can commit to a new buffer.
5830 *
5831 * Callers depend upon this behavior and assume that
5832 * we leave everything unchanged if we fail.
5833 */
Eric Dumazet9205fd92011-11-18 06:47:01 +00005834 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5835 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00005836 if (skb_size <= PAGE_SIZE) {
5837 data = netdev_alloc_frag(skb_size);
5838 *frag_size = skb_size;
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005839 } else {
5840 data = kmalloc(skb_size, GFP_ATOMIC);
5841 *frag_size = 0;
5842 }
Eric Dumazet9205fd92011-11-18 06:47:01 +00005843 if (!data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005844 return -ENOMEM;
5845
Eric Dumazet9205fd92011-11-18 06:47:01 +00005846 mapping = pci_map_single(tp->pdev,
5847 data + TG3_RX_OFFSET(tp),
5848 data_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005849 PCI_DMA_FROMDEVICE);
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005850 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00005851 tg3_frag_free(skb_size <= PAGE_SIZE, data);
Matt Carlsona21771d2009-11-02 14:25:31 +00005852 return -EIO;
5853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005854
Eric Dumazet9205fd92011-11-18 06:47:01 +00005855 map->data = data;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005856 dma_unmap_addr_set(map, mapping, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005857
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858 desc->addr_hi = ((u64)mapping >> 32);
5859 desc->addr_lo = ((u64)mapping & 0xffffffff);
5860
Eric Dumazet9205fd92011-11-18 06:47:01 +00005861 return data_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005862}
5863
5864/* We only need to move over in the address because the other
5865 * members of the RX descriptor are invariant. See notes above
Eric Dumazet9205fd92011-11-18 06:47:01 +00005866 * tg3_alloc_rx_data for full details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867 */
Matt Carlsona3896162009-11-13 13:03:44 +00005868static void tg3_recycle_rx(struct tg3_napi *tnapi,
5869 struct tg3_rx_prodring_set *dpr,
5870 u32 opaque_key, int src_idx,
5871 u32 dest_idx_unmasked)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005872{
Matt Carlson17375d22009-08-28 14:02:18 +00005873 struct tg3 *tp = tnapi->tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005874 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5875 struct ring_info *src_map, *dest_map;
Matt Carlson8fea32b2010-09-15 08:59:58 +00005876 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
Matt Carlsonc6cdf432010-04-05 10:19:26 +00005877 int dest_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005878
5879 switch (opaque_key) {
5880 case RXD_OPAQUE_RING_STD:
Matt Carlson2c49a442010-09-30 10:34:35 +00005881 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
Matt Carlsona3896162009-11-13 13:03:44 +00005882 dest_desc = &dpr->rx_std[dest_idx];
5883 dest_map = &dpr->rx_std_buffers[dest_idx];
5884 src_desc = &spr->rx_std[src_idx];
5885 src_map = &spr->rx_std_buffers[src_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005886 break;
5887
5888 case RXD_OPAQUE_RING_JUMBO:
Matt Carlson2c49a442010-09-30 10:34:35 +00005889 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
Matt Carlsona3896162009-11-13 13:03:44 +00005890 dest_desc = &dpr->rx_jmb[dest_idx].std;
5891 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5892 src_desc = &spr->rx_jmb[src_idx].std;
5893 src_map = &spr->rx_jmb_buffers[src_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894 break;
5895
5896 default:
5897 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005898 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899
Eric Dumazet9205fd92011-11-18 06:47:01 +00005900 dest_map->data = src_map->data;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005901 dma_unmap_addr_set(dest_map, mapping,
5902 dma_unmap_addr(src_map, mapping));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005903 dest_desc->addr_hi = src_desc->addr_hi;
5904 dest_desc->addr_lo = src_desc->addr_lo;
Matt Carlsone92967b2010-02-12 14:47:06 +00005905
5906 /* Ensure that the update to the skb happens after the physical
5907 * addresses have been transferred to the new BD location.
5908 */
5909 smp_wmb();
5910
Eric Dumazet9205fd92011-11-18 06:47:01 +00005911 src_map->data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912}
5913
Linus Torvalds1da177e2005-04-16 15:20:36 -07005914/* The RX ring scheme is composed of multiple rings which post fresh
5915 * buffers to the chip, and one special ring the chip uses to report
5916 * status back to the host.
5917 *
5918 * The special ring reports the status of received packets to the
5919 * host. The chip does not write into the original descriptor the
5920 * RX buffer was obtained from. The chip simply takes the original
5921 * descriptor as provided by the host, updates the status and length
5922 * field, then writes this into the next status ring entry.
5923 *
5924 * Each ring the host uses to post buffers to the chip is described
5925 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5926 * it is first placed into the on-chip ram. When the packet's length
5927 * is known, it walks down the TG3_BDINFO entries to select the ring.
5928 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5929 * which is within the range of the new packet's length is chosen.
5930 *
5931 * The "separate ring for rx status" scheme may sound queer, but it makes
5932 * sense from a cache coherency perspective. If only the host writes
5933 * to the buffer post rings, and only the chip writes to the rx status
5934 * rings, then cache lines never move beyond shared-modified state.
5935 * If both the host and chip were to write into the same ring, cache line
5936 * eviction could occur since both entities want it in an exclusive state.
5937 */
Matt Carlson17375d22009-08-28 14:02:18 +00005938static int tg3_rx(struct tg3_napi *tnapi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005939{
Matt Carlson17375d22009-08-28 14:02:18 +00005940 struct tg3 *tp = tnapi->tp;
Michael Chanf92905d2006-06-29 20:14:29 -07005941 u32 work_mask, rx_std_posted = 0;
Matt Carlson43619352009-11-13 13:03:47 +00005942 u32 std_prod_idx, jmb_prod_idx;
Matt Carlson72334482009-08-28 14:03:01 +00005943 u32 sw_idx = tnapi->rx_rcb_ptr;
Michael Chan483ba502005-04-25 15:14:03 -07005944 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945 int received;
Matt Carlson8fea32b2010-09-15 08:59:58 +00005946 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005947
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00005948 hw_idx = *(tnapi->rx_rcb_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949 /*
5950 * We need to order the read of hw_idx and the read of
5951 * the opaque cookie.
5952 */
5953 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005954 work_mask = 0;
5955 received = 0;
Matt Carlson43619352009-11-13 13:03:47 +00005956 std_prod_idx = tpr->rx_std_prod_idx;
5957 jmb_prod_idx = tpr->rx_jmb_prod_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005958 while (sw_idx != hw_idx && budget > 0) {
Matt Carlsonafc081f2009-11-13 13:03:43 +00005959 struct ring_info *ri;
Matt Carlson72334482009-08-28 14:03:01 +00005960 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005961 unsigned int len;
5962 struct sk_buff *skb;
5963 dma_addr_t dma_addr;
5964 u32 opaque_key, desc_idx, *post_ptr;
Eric Dumazet9205fd92011-11-18 06:47:01 +00005965 u8 *data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005966
5967 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5968 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5969 if (opaque_key == RXD_OPAQUE_RING_STD) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00005970 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005971 dma_addr = dma_unmap_addr(ri, mapping);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005972 data = ri->data;
Matt Carlson43619352009-11-13 13:03:47 +00005973 post_ptr = &std_prod_idx;
Michael Chanf92905d2006-06-29 20:14:29 -07005974 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00005976 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005977 dma_addr = dma_unmap_addr(ri, mapping);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005978 data = ri->data;
Matt Carlson43619352009-11-13 13:03:47 +00005979 post_ptr = &jmb_prod_idx;
Matt Carlson21f581a2009-08-28 14:00:25 +00005980 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005981 goto next_pkt_nopost;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982
5983 work_mask |= opaque_key;
5984
5985 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5986 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5987 drop_it:
Matt Carlsona3896162009-11-13 13:03:44 +00005988 tg3_recycle_rx(tnapi, tpr, opaque_key,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005989 desc_idx, *post_ptr);
5990 drop_it_no_recycle:
5991 /* Other statistics kept track of by card. */
Eric Dumazetb0057c52010-10-10 19:55:52 +00005992 tp->rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005993 goto next_pkt;
5994 }
5995
Eric Dumazet9205fd92011-11-18 06:47:01 +00005996 prefetch(data + TG3_RX_OFFSET(tp));
Matt Carlsonad829262008-11-21 17:16:16 -08005997 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5998 ETH_FCS_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005999
Matt Carlsond2757fc2010-04-12 06:58:27 +00006000 if (len > TG3_RX_COPY_THRESH(tp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006001 int skb_size;
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006002 unsigned int frag_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003
Eric Dumazet9205fd92011-11-18 06:47:01 +00006004 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006005 *post_ptr, &frag_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006006 if (skb_size < 0)
6007 goto drop_it;
6008
Matt Carlson287be122009-08-28 13:58:46 +00006009 pci_unmap_single(tp->pdev, dma_addr, skb_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010 PCI_DMA_FROMDEVICE);
6011
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006012 skb = build_skb(data, frag_size);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006013 if (!skb) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006014 tg3_frag_free(frag_size != 0, data);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006015 goto drop_it_no_recycle;
6016 }
6017 skb_reserve(skb, TG3_RX_OFFSET(tp));
6018 /* Ensure that the update to the data happens
Matt Carlson61e800c2010-02-17 15:16:54 +00006019 * after the usage of the old DMA mapping.
6020 */
6021 smp_wmb();
6022
Eric Dumazet9205fd92011-11-18 06:47:01 +00006023 ri->data = NULL;
Matt Carlson61e800c2010-02-17 15:16:54 +00006024
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025 } else {
Matt Carlsona3896162009-11-13 13:03:44 +00006026 tg3_recycle_rx(tnapi, tpr, opaque_key,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006027 desc_idx, *post_ptr);
6028
Eric Dumazet9205fd92011-11-18 06:47:01 +00006029 skb = netdev_alloc_skb(tp->dev,
6030 len + TG3_RAW_IP_ALIGN);
6031 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006032 goto drop_it_no_recycle;
6033
Eric Dumazet9205fd92011-11-18 06:47:01 +00006034 skb_reserve(skb, TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006035 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006036 memcpy(skb->data,
6037 data + TG3_RX_OFFSET(tp),
6038 len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006039 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006040 }
6041
Eric Dumazet9205fd92011-11-18 06:47:01 +00006042 skb_put(skb, len);
Michał Mirosławdc668912011-04-07 03:35:07 +00006043 if ((tp->dev->features & NETIF_F_RXCSUM) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07006044 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6045 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6046 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6047 skb->ip_summed = CHECKSUM_UNNECESSARY;
6048 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07006049 skb_checksum_none_assert(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006050
6051 skb->protocol = eth_type_trans(skb, tp->dev);
Matt Carlsonf7b493e2009-02-25 14:21:52 +00006052
6053 if (len > (tp->dev->mtu + ETH_HLEN) &&
6054 skb->protocol != htons(ETH_P_8021Q)) {
6055 dev_kfree_skb(skb);
Eric Dumazetb0057c52010-10-10 19:55:52 +00006056 goto drop_it_no_recycle;
Matt Carlsonf7b493e2009-02-25 14:21:52 +00006057 }
6058
Matt Carlson9dc7a112010-04-12 06:58:28 +00006059 if (desc->type_flags & RXD_FLAG_VLAN &&
Matt Carlsonbf933c82011-01-25 15:58:49 +00006060 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6061 __vlan_hwaccel_put_tag(skb,
6062 desc->err_vlan & RXD_VLAN_MASK);
Matt Carlson9dc7a112010-04-12 06:58:28 +00006063
Matt Carlsonbf933c82011-01-25 15:58:49 +00006064 napi_gro_receive(&tnapi->napi, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065
Linus Torvalds1da177e2005-04-16 15:20:36 -07006066 received++;
6067 budget--;
6068
6069next_pkt:
6070 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07006071
6072 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006073 tpr->rx_std_prod_idx = std_prod_idx &
6074 tp->rx_std_ring_mask;
Matt Carlson86cfe4f2010-01-12 10:11:37 +00006075 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6076 tpr->rx_std_prod_idx);
Michael Chanf92905d2006-06-29 20:14:29 -07006077 work_mask &= ~RXD_OPAQUE_RING_STD;
6078 rx_std_posted = 0;
6079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07006081 sw_idx++;
Matt Carlson7cb32cf2010-09-30 10:34:36 +00006082 sw_idx &= tp->rx_ret_ring_mask;
Michael Chan52f6d692005-04-25 15:14:32 -07006083
6084 /* Refresh hw_idx to see if there is new work */
6085 if (sw_idx == hw_idx) {
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00006086 hw_idx = *(tnapi->rx_rcb_prod_idx);
Michael Chan52f6d692005-04-25 15:14:32 -07006087 rmb();
6088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006089 }
6090
6091 /* ACK the status ring. */
Matt Carlson72334482009-08-28 14:03:01 +00006092 tnapi->rx_rcb_ptr = sw_idx;
6093 tw32_rx_mbox(tnapi->consmbox, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006094
6095 /* Refill RX ring(s). */
Joe Perches63c3a662011-04-26 08:12:10 +00006096 if (!tg3_flag(tp, ENABLE_RSS)) {
Michael Chan6541b802012-03-04 14:48:14 +00006097 /* Sync BD data before updating mailbox */
6098 wmb();
6099
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006100 if (work_mask & RXD_OPAQUE_RING_STD) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006101 tpr->rx_std_prod_idx = std_prod_idx &
6102 tp->rx_std_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006103 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6104 tpr->rx_std_prod_idx);
6105 }
6106 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006107 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6108 tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006109 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6110 tpr->rx_jmb_prod_idx);
6111 }
6112 mmiowb();
6113 } else if (work_mask) {
6114 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6115 * updated before the producer indices can be updated.
6116 */
6117 smp_wmb();
6118
Matt Carlson2c49a442010-09-30 10:34:35 +00006119 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6120 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006121
Michael Chan7ae52892012-03-21 15:38:33 +00006122 if (tnapi != &tp->napi[1]) {
6123 tp->rx_refill = true;
Matt Carlsone4af1af2010-02-12 14:47:05 +00006124 napi_schedule(&tp->napi[1].napi);
Michael Chan7ae52892012-03-21 15:38:33 +00006125 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127
6128 return received;
6129}
6130
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006131static void tg3_poll_link(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006132{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006133 /* handle link change and other phy events */
Joe Perches63c3a662011-04-26 08:12:10 +00006134 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006135 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6136
Linus Torvalds1da177e2005-04-16 15:20:36 -07006137 if (sblk->status & SD_STATUS_LINK_CHG) {
6138 sblk->status = SD_STATUS_UPDATED |
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006139 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07006140 spin_lock(&tp->lock);
Joe Perches63c3a662011-04-26 08:12:10 +00006141 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsondd477002008-05-25 23:45:58 -07006142 tw32_f(MAC_STATUS,
6143 (MAC_STATUS_SYNC_CHANGED |
6144 MAC_STATUS_CFG_CHANGED |
6145 MAC_STATUS_MI_COMPLETION |
6146 MAC_STATUS_LNKSTATE_CHANGED));
6147 udelay(40);
6148 } else
6149 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07006150 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006151 }
6152 }
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006153}
6154
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006155static int tg3_rx_prodring_xfer(struct tg3 *tp,
6156 struct tg3_rx_prodring_set *dpr,
6157 struct tg3_rx_prodring_set *spr)
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006158{
6159 u32 si, di, cpycnt, src_prod_idx;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006160 int i, err = 0;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006161
6162 while (1) {
6163 src_prod_idx = spr->rx_std_prod_idx;
6164
6165 /* Make sure updates to the rx_std_buffers[] entries and the
6166 * standard producer index are seen in the correct order.
6167 */
6168 smp_rmb();
6169
6170 if (spr->rx_std_cons_idx == src_prod_idx)
6171 break;
6172
6173 if (spr->rx_std_cons_idx < src_prod_idx)
6174 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6175 else
Matt Carlson2c49a442010-09-30 10:34:35 +00006176 cpycnt = tp->rx_std_ring_mask + 1 -
6177 spr->rx_std_cons_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006178
Matt Carlson2c49a442010-09-30 10:34:35 +00006179 cpycnt = min(cpycnt,
6180 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006181
6182 si = spr->rx_std_cons_idx;
6183 di = dpr->rx_std_prod_idx;
6184
Matt Carlsone92967b2010-02-12 14:47:06 +00006185 for (i = di; i < di + cpycnt; i++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00006186 if (dpr->rx_std_buffers[i].data) {
Matt Carlsone92967b2010-02-12 14:47:06 +00006187 cpycnt = i - di;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006188 err = -ENOSPC;
Matt Carlsone92967b2010-02-12 14:47:06 +00006189 break;
6190 }
6191 }
6192
6193 if (!cpycnt)
6194 break;
6195
6196 /* Ensure that updates to the rx_std_buffers ring and the
6197 * shadowed hardware producer ring from tg3_recycle_skb() are
6198 * ordered correctly WRT the skb check above.
6199 */
6200 smp_rmb();
6201
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006202 memcpy(&dpr->rx_std_buffers[di],
6203 &spr->rx_std_buffers[si],
6204 cpycnt * sizeof(struct ring_info));
6205
6206 for (i = 0; i < cpycnt; i++, di++, si++) {
6207 struct tg3_rx_buffer_desc *sbd, *dbd;
6208 sbd = &spr->rx_std[si];
6209 dbd = &dpr->rx_std[di];
6210 dbd->addr_hi = sbd->addr_hi;
6211 dbd->addr_lo = sbd->addr_lo;
6212 }
6213
Matt Carlson2c49a442010-09-30 10:34:35 +00006214 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6215 tp->rx_std_ring_mask;
6216 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6217 tp->rx_std_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006218 }
6219
6220 while (1) {
6221 src_prod_idx = spr->rx_jmb_prod_idx;
6222
6223 /* Make sure updates to the rx_jmb_buffers[] entries and
6224 * the jumbo producer index are seen in the correct order.
6225 */
6226 smp_rmb();
6227
6228 if (spr->rx_jmb_cons_idx == src_prod_idx)
6229 break;
6230
6231 if (spr->rx_jmb_cons_idx < src_prod_idx)
6232 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6233 else
Matt Carlson2c49a442010-09-30 10:34:35 +00006234 cpycnt = tp->rx_jmb_ring_mask + 1 -
6235 spr->rx_jmb_cons_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006236
6237 cpycnt = min(cpycnt,
Matt Carlson2c49a442010-09-30 10:34:35 +00006238 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006239
6240 si = spr->rx_jmb_cons_idx;
6241 di = dpr->rx_jmb_prod_idx;
6242
Matt Carlsone92967b2010-02-12 14:47:06 +00006243 for (i = di; i < di + cpycnt; i++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00006244 if (dpr->rx_jmb_buffers[i].data) {
Matt Carlsone92967b2010-02-12 14:47:06 +00006245 cpycnt = i - di;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006246 err = -ENOSPC;
Matt Carlsone92967b2010-02-12 14:47:06 +00006247 break;
6248 }
6249 }
6250
6251 if (!cpycnt)
6252 break;
6253
6254 /* Ensure that updates to the rx_jmb_buffers ring and the
6255 * shadowed hardware producer ring from tg3_recycle_skb() are
6256 * ordered correctly WRT the skb check above.
6257 */
6258 smp_rmb();
6259
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006260 memcpy(&dpr->rx_jmb_buffers[di],
6261 &spr->rx_jmb_buffers[si],
6262 cpycnt * sizeof(struct ring_info));
6263
6264 for (i = 0; i < cpycnt; i++, di++, si++) {
6265 struct tg3_rx_buffer_desc *sbd, *dbd;
6266 sbd = &spr->rx_jmb[si].std;
6267 dbd = &dpr->rx_jmb[di].std;
6268 dbd->addr_hi = sbd->addr_hi;
6269 dbd->addr_lo = sbd->addr_lo;
6270 }
6271
Matt Carlson2c49a442010-09-30 10:34:35 +00006272 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6273 tp->rx_jmb_ring_mask;
6274 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6275 tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006276 }
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006277
6278 return err;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006279}
6280
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006281static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6282{
6283 struct tg3 *tp = tnapi->tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006284
6285 /* run TX completion thread */
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006286 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
Matt Carlson17375d22009-08-28 14:02:18 +00006287 tg3_tx(tnapi);
Joe Perches63c3a662011-04-26 08:12:10 +00006288 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
Michael Chan4fd7ab52007-10-12 01:39:50 -07006289 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290 }
6291
Matt Carlsonf891ea12012-04-24 13:37:01 +00006292 if (!tnapi->rx_rcb_prod_idx)
6293 return work_done;
6294
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295 /* run RX thread, within the bounds set by NAPI.
6296 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006297 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07006298 */
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00006299 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
Matt Carlson17375d22009-08-28 14:02:18 +00006300 work_done += tg3_rx(tnapi, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006301
Joe Perches63c3a662011-04-26 08:12:10 +00006302 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00006303 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006304 int i, err = 0;
Matt Carlsone4af1af2010-02-12 14:47:05 +00006305 u32 std_prod_idx = dpr->rx_std_prod_idx;
6306 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006307
Michael Chan7ae52892012-03-21 15:38:33 +00006308 tp->rx_refill = false;
Michael Chan91024262012-09-28 07:12:38 +00006309 for (i = 1; i <= tp->rxq_cnt; i++)
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006310 err |= tg3_rx_prodring_xfer(tp, dpr,
Matt Carlson8fea32b2010-09-15 08:59:58 +00006311 &tp->napi[i].prodring);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006312
6313 wmb();
6314
Matt Carlsone4af1af2010-02-12 14:47:05 +00006315 if (std_prod_idx != dpr->rx_std_prod_idx)
6316 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6317 dpr->rx_std_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006318
Matt Carlsone4af1af2010-02-12 14:47:05 +00006319 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6320 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6321 dpr->rx_jmb_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006322
6323 mmiowb();
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006324
6325 if (err)
6326 tw32_f(HOSTCC_MODE, tp->coal_now);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006327 }
6328
David S. Miller6f535762007-10-11 18:08:29 -07006329 return work_done;
6330}
David S. Millerf7383c22005-05-18 22:50:53 -07006331
Matt Carlsondb219972011-11-04 09:15:03 +00006332static inline void tg3_reset_task_schedule(struct tg3 *tp)
6333{
6334 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6335 schedule_work(&tp->reset_task);
6336}
6337
6338static inline void tg3_reset_task_cancel(struct tg3 *tp)
6339{
6340 cancel_work_sync(&tp->reset_task);
6341 tg3_flag_clear(tp, RESET_TASK_PENDING);
Matt Carlsonc7101352012-02-22 12:35:20 +00006342 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
Matt Carlsondb219972011-11-04 09:15:03 +00006343}
6344
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006345static int tg3_poll_msix(struct napi_struct *napi, int budget)
6346{
6347 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6348 struct tg3 *tp = tnapi->tp;
6349 int work_done = 0;
6350 struct tg3_hw_status *sblk = tnapi->hw_status;
6351
6352 while (1) {
6353 work_done = tg3_poll_work(tnapi, work_done, budget);
6354
Joe Perches63c3a662011-04-26 08:12:10 +00006355 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006356 goto tx_recovery;
6357
6358 if (unlikely(work_done >= budget))
6359 break;
6360
Matt Carlsonc6cdf432010-04-05 10:19:26 +00006361 /* tp->last_tag is used in tg3_int_reenable() below
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006362 * to tell the hw how much work has been processed,
6363 * so we must read it before checking for more work.
6364 */
6365 tnapi->last_tag = sblk->status_tag;
6366 tnapi->last_irq_tag = tnapi->last_tag;
6367 rmb();
6368
6369 /* check for RX/TX work to do */
Matt Carlson6d40db72010-04-05 10:19:20 +00006370 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6371 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
Michael Chan7ae52892012-03-21 15:38:33 +00006372
6373 /* This test here is not race free, but will reduce
6374 * the number of interrupts by looping again.
6375 */
6376 if (tnapi == &tp->napi[1] && tp->rx_refill)
6377 continue;
6378
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006379 napi_complete(napi);
6380 /* Reenable interrupts. */
6381 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
Michael Chan7ae52892012-03-21 15:38:33 +00006382
6383 /* This test here is synchronized by napi_schedule()
6384 * and napi_complete() to close the race condition.
6385 */
6386 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6387 tw32(HOSTCC_MODE, tp->coalesce_mode |
6388 HOSTCC_MODE_ENABLE |
6389 tnapi->coal_now);
6390 }
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006391 mmiowb();
6392 break;
6393 }
6394 }
6395
6396 return work_done;
6397
6398tx_recovery:
6399 /* work_done is guaranteed to be less than budget. */
6400 napi_complete(napi);
Matt Carlsondb219972011-11-04 09:15:03 +00006401 tg3_reset_task_schedule(tp);
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006402 return work_done;
6403}
6404
Matt Carlsone64de4e2011-04-13 11:05:05 +00006405static void tg3_process_error(struct tg3 *tp)
6406{
6407 u32 val;
6408 bool real_error = false;
6409
Joe Perches63c3a662011-04-26 08:12:10 +00006410 if (tg3_flag(tp, ERROR_PROCESSED))
Matt Carlsone64de4e2011-04-13 11:05:05 +00006411 return;
6412
6413 /* Check Flow Attention register */
6414 val = tr32(HOSTCC_FLOW_ATTN);
6415 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6416 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6417 real_error = true;
6418 }
6419
6420 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6421 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6422 real_error = true;
6423 }
6424
6425 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6426 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6427 real_error = true;
6428 }
6429
6430 if (!real_error)
6431 return;
6432
6433 tg3_dump_state(tp);
6434
Joe Perches63c3a662011-04-26 08:12:10 +00006435 tg3_flag_set(tp, ERROR_PROCESSED);
Matt Carlsondb219972011-11-04 09:15:03 +00006436 tg3_reset_task_schedule(tp);
Matt Carlsone64de4e2011-04-13 11:05:05 +00006437}
6438
David S. Miller6f535762007-10-11 18:08:29 -07006439static int tg3_poll(struct napi_struct *napi, int budget)
6440{
Matt Carlson8ef04422009-08-28 14:01:37 +00006441 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6442 struct tg3 *tp = tnapi->tp;
David S. Miller6f535762007-10-11 18:08:29 -07006443 int work_done = 0;
Matt Carlson898a56f2009-08-28 14:02:40 +00006444 struct tg3_hw_status *sblk = tnapi->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07006445
6446 while (1) {
Matt Carlsone64de4e2011-04-13 11:05:05 +00006447 if (sblk->status & SD_STATUS_ERROR)
6448 tg3_process_error(tp);
6449
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006450 tg3_poll_link(tp);
6451
Matt Carlson17375d22009-08-28 14:02:18 +00006452 work_done = tg3_poll_work(tnapi, work_done, budget);
David S. Miller6f535762007-10-11 18:08:29 -07006453
Joe Perches63c3a662011-04-26 08:12:10 +00006454 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
David S. Miller6f535762007-10-11 18:08:29 -07006455 goto tx_recovery;
6456
6457 if (unlikely(work_done >= budget))
6458 break;
6459
Joe Perches63c3a662011-04-26 08:12:10 +00006460 if (tg3_flag(tp, TAGGED_STATUS)) {
Matt Carlson17375d22009-08-28 14:02:18 +00006461 /* tp->last_tag is used in tg3_int_reenable() below
Michael Chan4fd7ab52007-10-12 01:39:50 -07006462 * to tell the hw how much work has been processed,
6463 * so we must read it before checking for more work.
6464 */
Matt Carlson898a56f2009-08-28 14:02:40 +00006465 tnapi->last_tag = sblk->status_tag;
6466 tnapi->last_irq_tag = tnapi->last_tag;
Michael Chan4fd7ab52007-10-12 01:39:50 -07006467 rmb();
6468 } else
6469 sblk->status &= ~SD_STATUS_UPDATED;
6470
Matt Carlson17375d22009-08-28 14:02:18 +00006471 if (likely(!tg3_has_work(tnapi))) {
Ben Hutchings288379f2009-01-19 16:43:59 -08006472 napi_complete(napi);
Matt Carlson17375d22009-08-28 14:02:18 +00006473 tg3_int_reenable(tnapi);
David S. Miller6f535762007-10-11 18:08:29 -07006474 break;
6475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006476 }
6477
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006478 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07006479
6480tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07006481 /* work_done is guaranteed to be less than budget. */
Ben Hutchings288379f2009-01-19 16:43:59 -08006482 napi_complete(napi);
Matt Carlsondb219972011-11-04 09:15:03 +00006483 tg3_reset_task_schedule(tp);
Michael Chan4fd7ab52007-10-12 01:39:50 -07006484 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006485}
6486
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006487static void tg3_napi_disable(struct tg3 *tp)
6488{
6489 int i;
6490
6491 for (i = tp->irq_cnt - 1; i >= 0; i--)
6492 napi_disable(&tp->napi[i].napi);
6493}
6494
6495static void tg3_napi_enable(struct tg3 *tp)
6496{
6497 int i;
6498
6499 for (i = 0; i < tp->irq_cnt; i++)
6500 napi_enable(&tp->napi[i].napi);
6501}
6502
6503static void tg3_napi_init(struct tg3 *tp)
6504{
6505 int i;
6506
6507 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6508 for (i = 1; i < tp->irq_cnt; i++)
6509 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6510}
6511
6512static void tg3_napi_fini(struct tg3 *tp)
6513{
6514 int i;
6515
6516 for (i = 0; i < tp->irq_cnt; i++)
6517 netif_napi_del(&tp->napi[i].napi);
6518}
6519
6520static inline void tg3_netif_stop(struct tg3 *tp)
6521{
6522 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6523 tg3_napi_disable(tp);
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00006524 netif_carrier_off(tp->dev);
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006525 netif_tx_disable(tp->dev);
6526}
6527
6528static inline void tg3_netif_start(struct tg3 *tp)
6529{
6530 /* NOTE: unconditional netif_tx_wake_all_queues is only
6531 * appropriate so long as all callers are assured to
6532 * have free tx slots (such as after tg3_init_hw)
6533 */
6534 netif_tx_wake_all_queues(tp->dev);
6535
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00006536 if (tp->link_up)
6537 netif_carrier_on(tp->dev);
6538
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006539 tg3_napi_enable(tp);
6540 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6541 tg3_enable_ints(tp);
6542}
6543
David S. Millerf47c11e2005-06-24 20:18:35 -07006544static void tg3_irq_quiesce(struct tg3 *tp)
6545{
Matt Carlson4f125f42009-09-01 12:55:02 +00006546 int i;
6547
David S. Millerf47c11e2005-06-24 20:18:35 -07006548 BUG_ON(tp->irq_sync);
6549
6550 tp->irq_sync = 1;
6551 smp_mb();
6552
Matt Carlson4f125f42009-09-01 12:55:02 +00006553 for (i = 0; i < tp->irq_cnt; i++)
6554 synchronize_irq(tp->napi[i].irq_vec);
David S. Millerf47c11e2005-06-24 20:18:35 -07006555}
6556
David S. Millerf47c11e2005-06-24 20:18:35 -07006557/* Fully shutdown all tg3 driver activity elsewhere in the system.
6558 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6559 * with as well. Most of the time, this is not necessary except when
6560 * shutting down the device.
6561 */
6562static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6563{
Michael Chan46966542007-07-11 19:47:19 -07006564 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07006565 if (irq_sync)
6566 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07006567}
6568
6569static inline void tg3_full_unlock(struct tg3 *tp)
6570{
David S. Millerf47c11e2005-06-24 20:18:35 -07006571 spin_unlock_bh(&tp->lock);
6572}
6573
Michael Chanfcfa0a32006-03-20 22:28:41 -08006574/* One-shot MSI handler - Chip automatically disables interrupt
6575 * after sending MSI so driver doesn't have to do it.
6576 */
David Howells7d12e782006-10-05 14:55:46 +01006577static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08006578{
Matt Carlson09943a12009-08-28 14:01:57 +00006579 struct tg3_napi *tnapi = dev_id;
6580 struct tg3 *tp = tnapi->tp;
Michael Chanfcfa0a32006-03-20 22:28:41 -08006581
Matt Carlson898a56f2009-08-28 14:02:40 +00006582 prefetch(tnapi->hw_status);
Matt Carlson0c1d0e22009-09-01 13:16:33 +00006583 if (tnapi->rx_rcb)
6584 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Michael Chanfcfa0a32006-03-20 22:28:41 -08006585
6586 if (likely(!tg3_irq_sync(tp)))
Matt Carlson09943a12009-08-28 14:01:57 +00006587 napi_schedule(&tnapi->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08006588
6589 return IRQ_HANDLED;
6590}
6591
Michael Chan88b06bc22005-04-21 17:13:25 -07006592/* MSI ISR - No need to check for interrupt sharing and no need to
6593 * flush status block and interrupt mailbox. PCI ordering rules
6594 * guarantee that MSI will arrive after the status block.
6595 */
David Howells7d12e782006-10-05 14:55:46 +01006596static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc22005-04-21 17:13:25 -07006597{
Matt Carlson09943a12009-08-28 14:01:57 +00006598 struct tg3_napi *tnapi = dev_id;
6599 struct tg3 *tp = tnapi->tp;
Michael Chan88b06bc22005-04-21 17:13:25 -07006600
Matt Carlson898a56f2009-08-28 14:02:40 +00006601 prefetch(tnapi->hw_status);
Matt Carlson0c1d0e22009-09-01 13:16:33 +00006602 if (tnapi->rx_rcb)
6603 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Michael Chan88b06bc22005-04-21 17:13:25 -07006604 /*
David S. Millerfac9b832005-05-18 22:46:34 -07006605 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc22005-04-21 17:13:25 -07006606 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07006607 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc22005-04-21 17:13:25 -07006608 * NIC to stop sending us irqs, engaging "in-intr-handler"
6609 * event coalescing.
6610 */
Matt Carlson5b39de92011-08-31 11:44:50 +00006611 tw32_mailbox(tnapi->int_mbox, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07006612 if (likely(!tg3_irq_sync(tp)))
Matt Carlson09943a12009-08-28 14:01:57 +00006613 napi_schedule(&tnapi->napi);
Michael Chan61487482005-09-05 17:53:19 -07006614
Michael Chan88b06bc22005-04-21 17:13:25 -07006615 return IRQ_RETVAL(1);
6616}
6617
David Howells7d12e782006-10-05 14:55:46 +01006618static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619{
Matt Carlson09943a12009-08-28 14:01:57 +00006620 struct tg3_napi *tnapi = dev_id;
6621 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00006622 struct tg3_hw_status *sblk = tnapi->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006623 unsigned int handled = 1;
6624
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625 /* In INTx mode, it is possible for the interrupt to arrive at
6626 * the CPU before the status block posted prior to the interrupt.
6627 * Reading the PCI State register will confirm whether the
6628 * interrupt is ours and will flush the status block.
6629 */
Michael Chand18edcb2007-03-24 20:57:11 -07006630 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
Joe Perches63c3a662011-04-26 08:12:10 +00006631 if (tg3_flag(tp, CHIP_RESETTING) ||
Michael Chand18edcb2007-03-24 20:57:11 -07006632 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6633 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07006634 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07006635 }
Michael Chand18edcb2007-03-24 20:57:11 -07006636 }
6637
6638 /*
6639 * Writing any value to intr-mbox-0 clears PCI INTA# and
6640 * chip-internal interrupt pending events.
6641 * Writing non-zero to intr-mbox-0 additional tells the
6642 * NIC to stop sending us irqs, engaging "in-intr-handler"
6643 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07006644 *
6645 * Flush the mailbox to de-assert the IRQ immediately to prevent
6646 * spurious interrupts. The flush impacts performance but
6647 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07006648 */
Michael Chanc04cb342007-05-07 00:26:15 -07006649 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07006650 if (tg3_irq_sync(tp))
6651 goto out;
6652 sblk->status &= ~SD_STATUS_UPDATED;
Matt Carlson17375d22009-08-28 14:02:18 +00006653 if (likely(tg3_has_work(tnapi))) {
Matt Carlson72334482009-08-28 14:03:01 +00006654 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Matt Carlson09943a12009-08-28 14:01:57 +00006655 napi_schedule(&tnapi->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07006656 } else {
6657 /* No work, shared interrupt perhaps? re-enable
6658 * interrupts, and flush that PCI write
6659 */
6660 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6661 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07006662 }
David S. Millerf47c11e2005-06-24 20:18:35 -07006663out:
David S. Millerfac9b832005-05-18 22:46:34 -07006664 return IRQ_RETVAL(handled);
6665}
6666
David Howells7d12e782006-10-05 14:55:46 +01006667static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07006668{
Matt Carlson09943a12009-08-28 14:01:57 +00006669 struct tg3_napi *tnapi = dev_id;
6670 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00006671 struct tg3_hw_status *sblk = tnapi->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07006672 unsigned int handled = 1;
6673
David S. Millerfac9b832005-05-18 22:46:34 -07006674 /* In INTx mode, it is possible for the interrupt to arrive at
6675 * the CPU before the status block posted prior to the interrupt.
6676 * Reading the PCI State register will confirm whether the
6677 * interrupt is ours and will flush the status block.
6678 */
Matt Carlson898a56f2009-08-28 14:02:40 +00006679 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
Joe Perches63c3a662011-04-26 08:12:10 +00006680 if (tg3_flag(tp, CHIP_RESETTING) ||
Michael Chand18edcb2007-03-24 20:57:11 -07006681 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6682 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07006683 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006684 }
Michael Chand18edcb2007-03-24 20:57:11 -07006685 }
6686
6687 /*
6688 * writing any value to intr-mbox-0 clears PCI INTA# and
6689 * chip-internal interrupt pending events.
6690 * writing non-zero to intr-mbox-0 additional tells the
6691 * NIC to stop sending us irqs, engaging "in-intr-handler"
6692 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07006693 *
6694 * Flush the mailbox to de-assert the IRQ immediately to prevent
6695 * spurious interrupts. The flush impacts performance but
6696 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07006697 */
Michael Chanc04cb342007-05-07 00:26:15 -07006698 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Matt Carlson624f8e52009-04-20 06:55:01 +00006699
6700 /*
6701 * In a shared interrupt configuration, sometimes other devices'
6702 * interrupts will scream. We record the current status tag here
6703 * so that the above check can report that the screaming interrupts
6704 * are unhandled. Eventually they will be silenced.
6705 */
Matt Carlson898a56f2009-08-28 14:02:40 +00006706 tnapi->last_irq_tag = sblk->status_tag;
Matt Carlson624f8e52009-04-20 06:55:01 +00006707
Michael Chand18edcb2007-03-24 20:57:11 -07006708 if (tg3_irq_sync(tp))
6709 goto out;
Matt Carlson624f8e52009-04-20 06:55:01 +00006710
Matt Carlson72334482009-08-28 14:03:01 +00006711 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Matt Carlson624f8e52009-04-20 06:55:01 +00006712
Matt Carlson09943a12009-08-28 14:01:57 +00006713 napi_schedule(&tnapi->napi);
Matt Carlson624f8e52009-04-20 06:55:01 +00006714
David S. Millerf47c11e2005-06-24 20:18:35 -07006715out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006716 return IRQ_RETVAL(handled);
6717}
6718
Michael Chan79381092005-04-21 17:13:59 -07006719/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01006720static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07006721{
Matt Carlson09943a12009-08-28 14:01:57 +00006722 struct tg3_napi *tnapi = dev_id;
6723 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00006724 struct tg3_hw_status *sblk = tnapi->hw_status;
Michael Chan79381092005-04-21 17:13:59 -07006725
Michael Chanf9804dd2005-09-27 12:13:10 -07006726 if ((sblk->status & SD_STATUS_UPDATED) ||
6727 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07006728 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07006729 return IRQ_RETVAL(1);
6730 }
6731 return IRQ_RETVAL(0);
6732}
6733
Linus Torvalds1da177e2005-04-16 15:20:36 -07006734#ifdef CONFIG_NET_POLL_CONTROLLER
6735static void tg3_poll_controller(struct net_device *dev)
6736{
Matt Carlson4f125f42009-09-01 12:55:02 +00006737 int i;
Michael Chan88b06bc22005-04-21 17:13:25 -07006738 struct tg3 *tp = netdev_priv(dev);
6739
Matt Carlson4f125f42009-09-01 12:55:02 +00006740 for (i = 0; i < tp->irq_cnt; i++)
Louis Rillingfe234f02010-03-09 06:14:41 +00006741 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006742}
6743#endif
6744
Linus Torvalds1da177e2005-04-16 15:20:36 -07006745static void tg3_tx_timeout(struct net_device *dev)
6746{
6747 struct tg3 *tp = netdev_priv(dev);
6748
Michael Chanb0408752007-02-13 12:18:30 -08006749 if (netif_msg_tx_err(tp)) {
Joe Perches05dbe002010-02-17 19:44:19 +00006750 netdev_err(dev, "transmit timed out, resetting\n");
Matt Carlson97bd8e42011-04-13 11:05:04 +00006751 tg3_dump_state(tp);
Michael Chanb0408752007-02-13 12:18:30 -08006752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006753
Matt Carlsondb219972011-11-04 09:15:03 +00006754 tg3_reset_task_schedule(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006755}
6756
Michael Chanc58ec932005-09-17 00:46:27 -07006757/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6758static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6759{
6760 u32 base = (u32) mapping & 0xffffffff;
6761
Eric Dumazet807540b2010-09-23 05:40:09 +00006762 return (base > 0xffffdcc0) && (base + len + 8 < base);
Michael Chanc58ec932005-09-17 00:46:27 -07006763}
6764
Michael Chan72f2afb2006-03-06 19:28:35 -08006765/* Test for DMA addresses > 40-bit */
6766static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6767 int len)
6768{
6769#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Joe Perches63c3a662011-04-26 08:12:10 +00006770 if (tg3_flag(tp, 40BIT_DMA_BUG))
Eric Dumazet807540b2010-09-23 05:40:09 +00006771 return ((u64) mapping + len) > DMA_BIT_MASK(40);
Michael Chan72f2afb2006-03-06 19:28:35 -08006772 return 0;
6773#else
6774 return 0;
6775#endif
6776}
6777
Matt Carlsond1a3b732011-07-27 14:20:51 +00006778static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
Matt Carlson92cd3a12011-07-27 14:20:47 +00006779 dma_addr_t mapping, u32 len, u32 flags,
6780 u32 mss, u32 vlan)
Matt Carlson2ffcc982011-05-19 12:12:44 +00006781{
Matt Carlson92cd3a12011-07-27 14:20:47 +00006782 txbd->addr_hi = ((u64) mapping >> 32);
6783 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6784 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6785 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
Matt Carlson2ffcc982011-05-19 12:12:44 +00006786}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006787
Matt Carlson84b67b22011-07-27 14:20:52 +00006788static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
Matt Carlsond1a3b732011-07-27 14:20:51 +00006789 dma_addr_t map, u32 len, u32 flags,
6790 u32 mss, u32 vlan)
6791{
6792 struct tg3 *tp = tnapi->tp;
6793 bool hwbug = false;
6794
6795 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
Rusty Russell3db1cd52011-12-19 13:56:45 +00006796 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00006797
6798 if (tg3_4g_overflow_test(map, len))
Rusty Russell3db1cd52011-12-19 13:56:45 +00006799 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00006800
6801 if (tg3_40bit_overflow_test(tp, map, len))
Rusty Russell3db1cd52011-12-19 13:56:45 +00006802 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00006803
Matt Carlsona4cb4282011-12-14 11:09:58 +00006804 if (tp->dma_limit) {
Matt Carlsonb9e45482011-11-04 09:14:59 +00006805 u32 prvidx = *entry;
Matt Carlsone31aa982011-07-27 14:20:53 +00006806 u32 tmp_flag = flags & ~TXD_FLAG_END;
Matt Carlsona4cb4282011-12-14 11:09:58 +00006807 while (len > tp->dma_limit && *budget) {
6808 u32 frag_len = tp->dma_limit;
6809 len -= tp->dma_limit;
Matt Carlsone31aa982011-07-27 14:20:53 +00006810
Matt Carlsonb9e45482011-11-04 09:14:59 +00006811 /* Avoid the 8byte DMA problem */
6812 if (len <= 8) {
Matt Carlsona4cb4282011-12-14 11:09:58 +00006813 len += tp->dma_limit / 2;
6814 frag_len = tp->dma_limit / 2;
Matt Carlsone31aa982011-07-27 14:20:53 +00006815 }
6816
Matt Carlsonb9e45482011-11-04 09:14:59 +00006817 tnapi->tx_buffers[*entry].fragmented = true;
6818
6819 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6820 frag_len, tmp_flag, mss, vlan);
6821 *budget -= 1;
6822 prvidx = *entry;
6823 *entry = NEXT_TX(*entry);
6824
Matt Carlsone31aa982011-07-27 14:20:53 +00006825 map += frag_len;
6826 }
6827
6828 if (len) {
6829 if (*budget) {
6830 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6831 len, flags, mss, vlan);
Matt Carlsonb9e45482011-11-04 09:14:59 +00006832 *budget -= 1;
Matt Carlsone31aa982011-07-27 14:20:53 +00006833 *entry = NEXT_TX(*entry);
6834 } else {
Rusty Russell3db1cd52011-12-19 13:56:45 +00006835 hwbug = true;
Matt Carlsonb9e45482011-11-04 09:14:59 +00006836 tnapi->tx_buffers[prvidx].fragmented = false;
Matt Carlsone31aa982011-07-27 14:20:53 +00006837 }
6838 }
6839 } else {
Matt Carlson84b67b22011-07-27 14:20:52 +00006840 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6841 len, flags, mss, vlan);
Matt Carlsone31aa982011-07-27 14:20:53 +00006842 *entry = NEXT_TX(*entry);
6843 }
Matt Carlsond1a3b732011-07-27 14:20:51 +00006844
6845 return hwbug;
6846}
6847
Matt Carlson0d681b22011-07-27 14:20:49 +00006848static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
Matt Carlson432aa7e2011-05-19 12:12:45 +00006849{
6850 int i;
Matt Carlson0d681b22011-07-27 14:20:49 +00006851 struct sk_buff *skb;
Matt Carlsondf8944c2011-07-27 14:20:46 +00006852 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
Matt Carlson432aa7e2011-05-19 12:12:45 +00006853
Matt Carlson0d681b22011-07-27 14:20:49 +00006854 skb = txb->skb;
6855 txb->skb = NULL;
6856
Matt Carlson432aa7e2011-05-19 12:12:45 +00006857 pci_unmap_single(tnapi->tp->pdev,
6858 dma_unmap_addr(txb, mapping),
6859 skb_headlen(skb),
6860 PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00006861
6862 while (txb->fragmented) {
6863 txb->fragmented = false;
6864 entry = NEXT_TX(entry);
6865 txb = &tnapi->tx_buffers[entry];
6866 }
6867
Matt Carlsonba1142e2011-11-04 09:15:00 +00006868 for (i = 0; i <= last; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00006869 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Matt Carlson432aa7e2011-05-19 12:12:45 +00006870
6871 entry = NEXT_TX(entry);
6872 txb = &tnapi->tx_buffers[entry];
6873
6874 pci_unmap_page(tnapi->tp->pdev,
6875 dma_unmap_addr(txb, mapping),
Eric Dumazet9e903e02011-10-18 21:00:24 +00006876 skb_frag_size(frag), PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00006877
6878 while (txb->fragmented) {
6879 txb->fragmented = false;
6880 entry = NEXT_TX(entry);
6881 txb = &tnapi->tx_buffers[entry];
6882 }
Matt Carlson432aa7e2011-05-19 12:12:45 +00006883 }
6884}
6885
Michael Chan72f2afb2006-03-06 19:28:35 -08006886/* Workaround 4GB and 40-bit hardware DMA bugs. */
Matt Carlson24f4efd2009-11-13 13:03:35 +00006887static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
David S. Miller1805b2f2011-10-24 18:18:09 -04006888 struct sk_buff **pskb,
Matt Carlson84b67b22011-07-27 14:20:52 +00006889 u32 *entry, u32 *budget,
Matt Carlson92cd3a12011-07-27 14:20:47 +00006890 u32 base_flags, u32 mss, u32 vlan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006891{
Matt Carlson24f4efd2009-11-13 13:03:35 +00006892 struct tg3 *tp = tnapi->tp;
David S. Miller1805b2f2011-10-24 18:18:09 -04006893 struct sk_buff *new_skb, *skb = *pskb;
Michael Chanc58ec932005-09-17 00:46:27 -07006894 dma_addr_t new_addr = 0;
Matt Carlson432aa7e2011-05-19 12:12:45 +00006895 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896
Matt Carlson41588ba2008-04-19 18:12:33 -07006897 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6898 new_skb = skb_copy(skb, GFP_ATOMIC);
6899 else {
6900 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6901
6902 new_skb = skb_copy_expand(skb,
6903 skb_headroom(skb) + more_headroom,
6904 skb_tailroom(skb), GFP_ATOMIC);
6905 }
6906
Linus Torvalds1da177e2005-04-16 15:20:36 -07006907 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07006908 ret = -1;
6909 } else {
6910 /* New SKB is guaranteed to be linear. */
Alexander Duyckf4188d82009-12-02 16:48:38 +00006911 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6912 PCI_DMA_TODEVICE);
6913 /* Make sure the mapping succeeded */
6914 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
Alexander Duyckf4188d82009-12-02 16:48:38 +00006915 dev_kfree_skb(new_skb);
Michael Chanc58ec932005-09-17 00:46:27 -07006916 ret = -1;
Michael Chanc58ec932005-09-17 00:46:27 -07006917 } else {
Matt Carlsonb9e45482011-11-04 09:14:59 +00006918 u32 save_entry = *entry;
6919
Matt Carlson92cd3a12011-07-27 14:20:47 +00006920 base_flags |= TXD_FLAG_END;
6921
Matt Carlson84b67b22011-07-27 14:20:52 +00006922 tnapi->tx_buffers[*entry].skb = new_skb;
6923 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
Matt Carlson432aa7e2011-05-19 12:12:45 +00006924 mapping, new_addr);
6925
Matt Carlson84b67b22011-07-27 14:20:52 +00006926 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
Matt Carlsond1a3b732011-07-27 14:20:51 +00006927 new_skb->len, base_flags,
6928 mss, vlan)) {
Matt Carlsonba1142e2011-11-04 09:15:00 +00006929 tg3_tx_skb_unmap(tnapi, save_entry, -1);
Matt Carlsond1a3b732011-07-27 14:20:51 +00006930 dev_kfree_skb(new_skb);
6931 ret = -1;
6932 }
Michael Chanc58ec932005-09-17 00:46:27 -07006933 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006934 }
6935
Linus Torvalds1da177e2005-04-16 15:20:36 -07006936 dev_kfree_skb(skb);
David S. Miller1805b2f2011-10-24 18:18:09 -04006937 *pskb = new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07006938 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006939}
6940
Matt Carlson2ffcc982011-05-19 12:12:44 +00006941static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
Michael Chan52c0fd82006-06-29 20:15:54 -07006942
6943/* Use GSO to workaround a rare TSO bug that may be triggered when the
6944 * TSO header is greater than 80 bytes.
6945 */
6946static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6947{
6948 struct sk_buff *segs, *nskb;
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006949 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
Michael Chan52c0fd82006-06-29 20:15:54 -07006950
6951 /* Estimate the number of fragments in the worst case */
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006952 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
Michael Chan52c0fd82006-06-29 20:15:54 -07006953 netif_stop_queue(tp->dev);
Matt Carlsonf65aac12010-08-02 11:26:03 +00006954
6955 /* netif_tx_stop_queue() must be done before checking
6956 * checking tx index in tg3_tx_avail() below, because in
6957 * tg3_tx(), we update tx index before checking for
6958 * netif_tx_queue_stopped().
6959 */
6960 smp_mb();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006961 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
Michael Chan7f62ad52007-02-20 23:25:40 -08006962 return NETDEV_TX_BUSY;
6963
6964 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07006965 }
6966
6967 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07006968 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07006969 goto tg3_tso_bug_end;
6970
6971 do {
6972 nskb = segs;
6973 segs = segs->next;
6974 nskb->next = NULL;
Matt Carlson2ffcc982011-05-19 12:12:44 +00006975 tg3_start_xmit(nskb, tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07006976 } while (segs);
6977
6978tg3_tso_bug_end:
6979 dev_kfree_skb(skb);
6980
6981 return NETDEV_TX_OK;
6982}
Michael Chan52c0fd82006-06-29 20:15:54 -07006983
Michael Chan5a6f3072006-03-20 22:28:05 -08006984/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
Joe Perches63c3a662011-04-26 08:12:10 +00006985 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
Michael Chan5a6f3072006-03-20 22:28:05 -08006986 */
Matt Carlson2ffcc982011-05-19 12:12:44 +00006987static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
Michael Chan5a6f3072006-03-20 22:28:05 -08006988{
6989 struct tg3 *tp = netdev_priv(dev);
Matt Carlson92cd3a12011-07-27 14:20:47 +00006990 u32 len, entry, base_flags, mss, vlan = 0;
Matt Carlson84b67b22011-07-27 14:20:52 +00006991 u32 budget;
Matt Carlson432aa7e2011-05-19 12:12:45 +00006992 int i = -1, would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07006993 dma_addr_t mapping;
Matt Carlson24f4efd2009-11-13 13:03:35 +00006994 struct tg3_napi *tnapi;
6995 struct netdev_queue *txq;
Matt Carlson432aa7e2011-05-19 12:12:45 +00006996 unsigned int last;
Alexander Duyckf4188d82009-12-02 16:48:38 +00006997
Matt Carlson24f4efd2009-11-13 13:03:35 +00006998 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6999 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
Joe Perches63c3a662011-04-26 08:12:10 +00007000 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlson24f4efd2009-11-13 13:03:35 +00007001 tnapi++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002
Matt Carlson84b67b22011-07-27 14:20:52 +00007003 budget = tg3_tx_avail(tnapi);
7004
Michael Chan00b70502006-06-17 21:58:45 -07007005 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007006 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07007007 * interrupt. Furthermore, IRQ processing runs lockless so we have
7008 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07007009 */
Matt Carlson84b67b22011-07-27 14:20:52 +00007010 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
Matt Carlson24f4efd2009-11-13 13:03:35 +00007011 if (!netif_tx_queue_stopped(txq)) {
7012 netif_tx_stop_queue(txq);
Stephen Hemminger1f064a82005-12-06 17:36:44 -08007013
7014 /* This is a hard error, log it. */
Matt Carlson5129c3a2010-04-05 10:19:23 +00007015 netdev_err(dev,
7016 "BUG! Tx Ring full when queue awake!\n");
Stephen Hemminger1f064a82005-12-06 17:36:44 -08007017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007018 return NETDEV_TX_BUSY;
7019 }
7020
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007021 entry = tnapi->tx_prod;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007022 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07007023 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007024 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Matt Carlson24f4efd2009-11-13 13:03:35 +00007025
Matt Carlsonbe98da62010-07-11 09:31:46 +00007026 mss = skb_shinfo(skb)->gso_size;
7027 if (mss) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007028 struct iphdr *iph;
Matt Carlson34195c32010-07-11 09:31:42 +00007029 u32 tcp_opt_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007030
7031 if (skb_header_cloned(skb) &&
Eric Dumazet48855432011-10-24 07:53:03 +00007032 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7033 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007034
Matt Carlson34195c32010-07-11 09:31:42 +00007035 iph = ip_hdr(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07007036 tcp_opt_len = tcp_optlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007037
Eric Dumazeta5a11952012-01-23 01:22:09 +00007038 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
Matt Carlson34195c32010-07-11 09:31:42 +00007039
Eric Dumazeta5a11952012-01-23 01:22:09 +00007040 if (!skb_is_gso_v6(skb)) {
Matt Carlson34195c32010-07-11 09:31:42 +00007041 iph->check = 0;
7042 iph->tot_len = htons(mss + hdr_len);
7043 }
7044
Michael Chan52c0fd82006-06-29 20:15:54 -07007045 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Joe Perches63c3a662011-04-26 08:12:10 +00007046 tg3_flag(tp, TSO_BUG))
Matt Carlsonde6f31e2010-04-12 06:58:30 +00007047 return tg3_tso_bug(tp, skb);
Michael Chan52c0fd82006-06-29 20:15:54 -07007048
Linus Torvalds1da177e2005-04-16 15:20:36 -07007049 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7050 TXD_FLAG_CPU_POST_DMA);
7051
Joe Perches63c3a662011-04-26 08:12:10 +00007052 if (tg3_flag(tp, HW_TSO_1) ||
7053 tg3_flag(tp, HW_TSO_2) ||
7054 tg3_flag(tp, HW_TSO_3)) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07007055 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007056 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07007057 } else
7058 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7059 iph->daddr, 0,
7060 IPPROTO_TCP,
7061 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007062
Joe Perches63c3a662011-04-26 08:12:10 +00007063 if (tg3_flag(tp, HW_TSO_3)) {
Matt Carlson615774f2009-11-13 13:03:39 +00007064 mss |= (hdr_len & 0xc) << 12;
7065 if (hdr_len & 0x10)
7066 base_flags |= 0x00000010;
7067 base_flags |= (hdr_len & 0x3e0) << 5;
Joe Perches63c3a662011-04-26 08:12:10 +00007068 } else if (tg3_flag(tp, HW_TSO_2))
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007069 mss |= hdr_len << 9;
Joe Perches63c3a662011-04-26 08:12:10 +00007070 else if (tg3_flag(tp, HW_TSO_1) ||
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007072 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007073 int tsflags;
7074
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007075 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007076 mss |= (tsflags << 11);
7077 }
7078 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007079 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007080 int tsflags;
7081
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007082 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007083 base_flags |= tsflags << 12;
7084 }
7085 }
7086 }
Matt Carlsonbf933c82011-01-25 15:58:49 +00007087
Matt Carlson93a700a2011-08-31 11:44:54 +00007088 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7089 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7090 base_flags |= TXD_FLAG_JMB_PKT;
7091
Matt Carlson92cd3a12011-07-27 14:20:47 +00007092 if (vlan_tx_tag_present(skb)) {
7093 base_flags |= TXD_FLAG_VLAN;
7094 vlan = vlan_tx_tag_get(skb);
7095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007096
Alexander Duyckf4188d82009-12-02 16:48:38 +00007097 len = skb_headlen(skb);
7098
7099 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Eric Dumazet48855432011-10-24 07:53:03 +00007100 if (pci_dma_mapping_error(tp->pdev, mapping))
7101 goto drop;
7102
David S. Miller90079ce2008-09-11 04:52:51 -07007103
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007104 tnapi->tx_buffers[entry].skb = skb;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00007105 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007106
7107 would_hit_hwbug = 0;
7108
Joe Perches63c3a662011-04-26 08:12:10 +00007109 if (tg3_flag(tp, 5701_DMA_BUG))
Michael Chanc58ec932005-09-17 00:46:27 -07007110 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007111
Matt Carlson84b67b22011-07-27 14:20:52 +00007112 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
Matt Carlsond1a3b732011-07-27 14:20:51 +00007113 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
Matt Carlsonba1142e2011-11-04 09:15:00 +00007114 mss, vlan)) {
Matt Carlsond1a3b732011-07-27 14:20:51 +00007115 would_hit_hwbug = 1;
Matt Carlsonba1142e2011-11-04 09:15:00 +00007116 } else if (skb_shinfo(skb)->nr_frags > 0) {
Matt Carlson92cd3a12011-07-27 14:20:47 +00007117 u32 tmp_mss = mss;
7118
7119 if (!tg3_flag(tp, HW_TSO_1) &&
7120 !tg3_flag(tp, HW_TSO_2) &&
7121 !tg3_flag(tp, HW_TSO_3))
7122 tmp_mss = 0;
7123
Matt Carlsonc5665a52012-02-13 10:20:12 +00007124 /* Now loop through additional data
7125 * fragments, and queue them.
7126 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127 last = skb_shinfo(skb)->nr_frags - 1;
7128 for (i = 0; i <= last; i++) {
7129 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7130
Eric Dumazet9e903e02011-10-18 21:00:24 +00007131 len = skb_frag_size(frag);
Ian Campbelldc234d02011-08-24 22:28:11 +00007132 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01007133 len, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007134
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007135 tnapi->tx_buffers[entry].skb = NULL;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00007136 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
Alexander Duyckf4188d82009-12-02 16:48:38 +00007137 mapping);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01007138 if (dma_mapping_error(&tp->pdev->dev, mapping))
Alexander Duyckf4188d82009-12-02 16:48:38 +00007139 goto dma_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140
Matt Carlsonb9e45482011-11-04 09:14:59 +00007141 if (!budget ||
7142 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
Matt Carlson84b67b22011-07-27 14:20:52 +00007143 len, base_flags |
7144 ((i == last) ? TXD_FLAG_END : 0),
Matt Carlsonb9e45482011-11-04 09:14:59 +00007145 tmp_mss, vlan)) {
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007146 would_hit_hwbug = 1;
Matt Carlsonb9e45482011-11-04 09:14:59 +00007147 break;
7148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149 }
7150 }
7151
7152 if (would_hit_hwbug) {
Matt Carlson0d681b22011-07-27 14:20:49 +00007153 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154
7155 /* If the workaround fails due to memory/mapping
7156 * failure, silently drop this packet.
7157 */
Matt Carlson84b67b22011-07-27 14:20:52 +00007158 entry = tnapi->tx_prod;
7159 budget = tg3_tx_avail(tnapi);
David S. Miller1805b2f2011-10-24 18:18:09 -04007160 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
Matt Carlson84b67b22011-07-27 14:20:52 +00007161 base_flags, mss, vlan))
Eric Dumazet48855432011-10-24 07:53:03 +00007162 goto drop_nofree;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007163 }
7164
Richard Cochrand515b452011-06-19 03:31:41 +00007165 skb_tx_timestamp(skb);
Tom Herbert5cb917b2012-03-05 19:53:50 +00007166 netdev_tx_sent_queue(txq, skb->len);
Richard Cochrand515b452011-06-19 03:31:41 +00007167
Michael Chan6541b802012-03-04 14:48:14 +00007168 /* Sync BD data before updating mailbox */
7169 wmb();
7170
Linus Torvalds1da177e2005-04-16 15:20:36 -07007171 /* Packets are ready, update Tx producer idx local and on card. */
Matt Carlson24f4efd2009-11-13 13:03:35 +00007172 tw32_tx_mbox(tnapi->prodmbox, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007173
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007174 tnapi->tx_prod = entry;
7175 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
Matt Carlson24f4efd2009-11-13 13:03:35 +00007176 netif_tx_stop_queue(txq);
Matt Carlsonf65aac12010-08-02 11:26:03 +00007177
7178 /* netif_tx_stop_queue() must be done before checking
7179 * checking tx index in tg3_tx_avail() below, because in
7180 * tg3_tx(), we update tx index before checking for
7181 * netif_tx_queue_stopped().
7182 */
7183 smp_mb();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007184 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
Matt Carlson24f4efd2009-11-13 13:03:35 +00007185 netif_tx_wake_queue(txq);
Michael Chan51b91462005-09-01 17:41:28 -07007186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007187
Eric Dumazetcdd0db02009-05-28 00:00:41 +00007188 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007189 return NETDEV_TX_OK;
Alexander Duyckf4188d82009-12-02 16:48:38 +00007190
7191dma_error:
Matt Carlsonba1142e2011-11-04 09:15:00 +00007192 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
Matt Carlson432aa7e2011-05-19 12:12:45 +00007193 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
Eric Dumazet48855432011-10-24 07:53:03 +00007194drop:
7195 dev_kfree_skb(skb);
7196drop_nofree:
7197 tp->tx_dropped++;
Alexander Duyckf4188d82009-12-02 16:48:38 +00007198 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007199}
7200
Matt Carlson6e01b202011-08-19 13:58:20 +00007201static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7202{
7203 if (enable) {
7204 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7205 MAC_MODE_PORT_MODE_MASK);
7206
7207 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7208
7209 if (!tg3_flag(tp, 5705_PLUS))
7210 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7211
7212 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7213 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7214 else
7215 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7216 } else {
7217 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7218
7219 if (tg3_flag(tp, 5705_PLUS) ||
7220 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7222 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7223 }
7224
7225 tw32(MAC_MODE, tp->mac_mode);
7226 udelay(40);
7227}
7228
Matt Carlson941ec902011-08-19 13:58:23 +00007229static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007230{
Matt Carlson941ec902011-08-19 13:58:23 +00007231 u32 val, bmcr, mac_mode, ptest = 0;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007232
7233 tg3_phy_toggle_apd(tp, false);
7234 tg3_phy_toggle_automdix(tp, 0);
7235
Matt Carlson941ec902011-08-19 13:58:23 +00007236 if (extlpbk && tg3_phy_set_extloopbk(tp))
7237 return -EIO;
7238
7239 bmcr = BMCR_FULLDPLX;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007240 switch (speed) {
7241 case SPEED_10:
7242 break;
7243 case SPEED_100:
7244 bmcr |= BMCR_SPEED100;
7245 break;
7246 case SPEED_1000:
7247 default:
7248 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7249 speed = SPEED_100;
7250 bmcr |= BMCR_SPEED100;
7251 } else {
7252 speed = SPEED_1000;
7253 bmcr |= BMCR_SPEED1000;
7254 }
7255 }
7256
Matt Carlson941ec902011-08-19 13:58:23 +00007257 if (extlpbk) {
7258 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7259 tg3_readphy(tp, MII_CTRL1000, &val);
7260 val |= CTL1000_AS_MASTER |
7261 CTL1000_ENABLE_MASTER;
7262 tg3_writephy(tp, MII_CTRL1000, val);
7263 } else {
7264 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7265 MII_TG3_FET_PTEST_TRIM_2;
7266 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7267 }
7268 } else
7269 bmcr |= BMCR_LOOPBACK;
7270
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007271 tg3_writephy(tp, MII_BMCR, bmcr);
7272
7273 /* The write needs to be flushed for the FETs */
7274 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7275 tg3_readphy(tp, MII_BMCR, &bmcr);
7276
7277 udelay(40);
7278
7279 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson941ec902011-08-19 13:58:23 +00007281 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007282 MII_TG3_FET_PTEST_FRC_TX_LINK |
7283 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7284
7285 /* The write needs to be flushed for the AC131 */
7286 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7287 }
7288
7289 /* Reset to prevent losing 1st rx packet intermittently */
7290 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7291 tg3_flag(tp, 5780_CLASS)) {
7292 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7293 udelay(10);
7294 tw32_f(MAC_RX_MODE, tp->rx_mode);
7295 }
7296
7297 mac_mode = tp->mac_mode &
7298 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7299 if (speed == SPEED_1000)
7300 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7301 else
7302 mac_mode |= MAC_MODE_PORT_MODE_MII;
7303
7304 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7305 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7306
7307 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7308 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7309 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7310 mac_mode |= MAC_MODE_LINK_POLARITY;
7311
7312 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7313 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7314 }
7315
7316 tw32(MAC_MODE, mac_mode);
7317 udelay(40);
Matt Carlson941ec902011-08-19 13:58:23 +00007318
7319 return 0;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007320}
7321
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007322static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007323{
7324 struct tg3 *tp = netdev_priv(dev);
7325
7326 if (features & NETIF_F_LOOPBACK) {
7327 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7328 return;
7329
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007330 spin_lock_bh(&tp->lock);
Matt Carlson6e01b202011-08-19 13:58:20 +00007331 tg3_mac_loopback(tp, true);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007332 netif_carrier_on(tp->dev);
7333 spin_unlock_bh(&tp->lock);
7334 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7335 } else {
7336 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7337 return;
7338
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007339 spin_lock_bh(&tp->lock);
Matt Carlson6e01b202011-08-19 13:58:20 +00007340 tg3_mac_loopback(tp, false);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007341 /* Force link status check */
7342 tg3_setup_phy(tp, 1);
7343 spin_unlock_bh(&tp->lock);
7344 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7345 }
7346}
7347
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007348static netdev_features_t tg3_fix_features(struct net_device *dev,
7349 netdev_features_t features)
Michał Mirosławdc668912011-04-07 03:35:07 +00007350{
7351 struct tg3 *tp = netdev_priv(dev);
7352
Joe Perches63c3a662011-04-26 08:12:10 +00007353 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
Michał Mirosławdc668912011-04-07 03:35:07 +00007354 features &= ~NETIF_F_ALL_TSO;
7355
7356 return features;
7357}
7358
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007359static int tg3_set_features(struct net_device *dev, netdev_features_t features)
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007360{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007361 netdev_features_t changed = dev->features ^ features;
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007362
7363 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7364 tg3_set_loopback(dev, features);
7365
7366 return 0;
7367}
7368
Matt Carlson21f581a2009-08-28 14:00:25 +00007369static void tg3_rx_prodring_free(struct tg3 *tp,
7370 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007371{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007372 int i;
7373
Matt Carlson8fea32b2010-09-15 08:59:58 +00007374 if (tpr != &tp->napi[0].prodring) {
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007375 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
Matt Carlson2c49a442010-09-30 10:34:35 +00007376 i = (i + 1) & tp->rx_std_ring_mask)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007377 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007378 tp->rx_pkt_map_sz);
7379
Joe Perches63c3a662011-04-26 08:12:10 +00007380 if (tg3_flag(tp, JUMBO_CAPABLE)) {
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007381 for (i = tpr->rx_jmb_cons_idx;
7382 i != tpr->rx_jmb_prod_idx;
Matt Carlson2c49a442010-09-30 10:34:35 +00007383 i = (i + 1) & tp->rx_jmb_ring_mask) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00007384 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007385 TG3_RX_JMB_MAP_SZ);
7386 }
7387 }
7388
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007389 return;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007390 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007391
Matt Carlson2c49a442010-09-30 10:34:35 +00007392 for (i = 0; i <= tp->rx_std_ring_mask; i++)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007393 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007394 tp->rx_pkt_map_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007395
Joe Perches63c3a662011-04-26 08:12:10 +00007396 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007397 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007398 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007399 TG3_RX_JMB_MAP_SZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007400 }
7401}
7402
Matt Carlsonc6cdf432010-04-05 10:19:26 +00007403/* Initialize rx rings for packet processing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007404 *
7405 * The chip has been shut down and the driver detached from
7406 * the networking, so no interrupts or new tx packets will
7407 * end up in the driver. tp->{tx,}lock are held and thus
7408 * we may not sleep.
7409 */
Matt Carlson21f581a2009-08-28 14:00:25 +00007410static int tg3_rx_prodring_alloc(struct tg3 *tp,
7411 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007412{
Matt Carlson287be122009-08-28 13:58:46 +00007413 u32 i, rx_pkt_dma_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007414
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007415 tpr->rx_std_cons_idx = 0;
7416 tpr->rx_std_prod_idx = 0;
7417 tpr->rx_jmb_cons_idx = 0;
7418 tpr->rx_jmb_prod_idx = 0;
7419
Matt Carlson8fea32b2010-09-15 08:59:58 +00007420 if (tpr != &tp->napi[0].prodring) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007421 memset(&tpr->rx_std_buffers[0], 0,
7422 TG3_RX_STD_BUFF_RING_SIZE(tp));
Matt Carlson48035722010-10-14 10:37:43 +00007423 if (tpr->rx_jmb_buffers)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007424 memset(&tpr->rx_jmb_buffers[0], 0,
Matt Carlson2c49a442010-09-30 10:34:35 +00007425 TG3_RX_JMB_BUFF_RING_SIZE(tp));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007426 goto done;
7427 }
7428
Linus Torvalds1da177e2005-04-16 15:20:36 -07007429 /* Zero out all descriptors. */
Matt Carlson2c49a442010-09-30 10:34:35 +00007430 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007431
Matt Carlson287be122009-08-28 13:58:46 +00007432 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
Joe Perches63c3a662011-04-26 08:12:10 +00007433 if (tg3_flag(tp, 5780_CLASS) &&
Matt Carlson287be122009-08-28 13:58:46 +00007434 tp->dev->mtu > ETH_DATA_LEN)
7435 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7436 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
Michael Chan7e72aad2005-07-25 12:31:17 -07007437
Linus Torvalds1da177e2005-04-16 15:20:36 -07007438 /* Initialize invariants of the rings, we only set this
7439 * stuff once. This works because the card does not
7440 * write into the rx buffer posting rings.
7441 */
Matt Carlson2c49a442010-09-30 10:34:35 +00007442 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007443 struct tg3_rx_buffer_desc *rxd;
7444
Matt Carlson21f581a2009-08-28 14:00:25 +00007445 rxd = &tpr->rx_std[i];
Matt Carlson287be122009-08-28 13:58:46 +00007446 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007447 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7448 rxd->opaque = (RXD_OPAQUE_RING_STD |
7449 (i << RXD_OPAQUE_INDEX_SHIFT));
7450 }
7451
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007452 /* Now allocate fresh SKBs for each rx ring. */
7453 for (i = 0; i < tp->rx_pending; i++) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00007454 unsigned int frag_size;
7455
7456 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7457 &frag_size) < 0) {
Matt Carlson5129c3a2010-04-05 10:19:23 +00007458 netdev_warn(tp->dev,
7459 "Using a smaller RX standard ring. Only "
7460 "%d out of %d buffers were allocated "
7461 "successfully\n", i, tp->rx_pending);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007462 if (i == 0)
7463 goto initfail;
7464 tp->rx_pending = i;
7465 break;
7466 }
7467 }
7468
Joe Perches63c3a662011-04-26 08:12:10 +00007469 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007470 goto done;
7471
Matt Carlson2c49a442010-09-30 10:34:35 +00007472 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007473
Joe Perches63c3a662011-04-26 08:12:10 +00007474 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
Matt Carlson0d86df82010-02-17 15:17:00 +00007475 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007476
Matt Carlson2c49a442010-09-30 10:34:35 +00007477 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
Matt Carlson0d86df82010-02-17 15:17:00 +00007478 struct tg3_rx_buffer_desc *rxd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007479
Matt Carlson0d86df82010-02-17 15:17:00 +00007480 rxd = &tpr->rx_jmb[i].std;
7481 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7482 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7483 RXD_FLAG_JUMBO;
7484 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7485 (i << RXD_OPAQUE_INDEX_SHIFT));
7486 }
7487
7488 for (i = 0; i < tp->rx_jumbo_pending; i++) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00007489 unsigned int frag_size;
7490
7491 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7492 &frag_size) < 0) {
Matt Carlson5129c3a2010-04-05 10:19:23 +00007493 netdev_warn(tp->dev,
7494 "Using a smaller RX jumbo ring. Only %d "
7495 "out of %d buffers were allocated "
7496 "successfully\n", i, tp->rx_jumbo_pending);
Matt Carlson0d86df82010-02-17 15:17:00 +00007497 if (i == 0)
7498 goto initfail;
7499 tp->rx_jumbo_pending = i;
7500 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501 }
7502 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007503
7504done:
Michael Chan32d8c572006-07-25 16:38:29 -07007505 return 0;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007506
7507initfail:
Matt Carlson21f581a2009-08-28 14:00:25 +00007508 tg3_rx_prodring_free(tp, tpr);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007509 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510}
7511
Matt Carlson21f581a2009-08-28 14:00:25 +00007512static void tg3_rx_prodring_fini(struct tg3 *tp,
7513 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007514{
Matt Carlson21f581a2009-08-28 14:00:25 +00007515 kfree(tpr->rx_std_buffers);
7516 tpr->rx_std_buffers = NULL;
7517 kfree(tpr->rx_jmb_buffers);
7518 tpr->rx_jmb_buffers = NULL;
7519 if (tpr->rx_std) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007520 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7521 tpr->rx_std, tpr->rx_std_mapping);
Matt Carlson21f581a2009-08-28 14:00:25 +00007522 tpr->rx_std = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007523 }
Matt Carlson21f581a2009-08-28 14:00:25 +00007524 if (tpr->rx_jmb) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007525 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7526 tpr->rx_jmb, tpr->rx_jmb_mapping);
Matt Carlson21f581a2009-08-28 14:00:25 +00007527 tpr->rx_jmb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007528 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007529}
7530
Matt Carlson21f581a2009-08-28 14:00:25 +00007531static int tg3_rx_prodring_init(struct tg3 *tp,
7532 struct tg3_rx_prodring_set *tpr)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007533{
Matt Carlson2c49a442010-09-30 10:34:35 +00007534 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7535 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007536 if (!tpr->rx_std_buffers)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007537 return -ENOMEM;
7538
Matt Carlson4bae65c2010-11-24 08:31:52 +00007539 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7540 TG3_RX_STD_RING_BYTES(tp),
7541 &tpr->rx_std_mapping,
7542 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007543 if (!tpr->rx_std)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007544 goto err_out;
7545
Joe Perches63c3a662011-04-26 08:12:10 +00007546 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007547 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
Matt Carlson21f581a2009-08-28 14:00:25 +00007548 GFP_KERNEL);
7549 if (!tpr->rx_jmb_buffers)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007550 goto err_out;
7551
Matt Carlson4bae65c2010-11-24 08:31:52 +00007552 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7553 TG3_RX_JMB_RING_BYTES(tp),
7554 &tpr->rx_jmb_mapping,
7555 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007556 if (!tpr->rx_jmb)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007557 goto err_out;
7558 }
7559
7560 return 0;
7561
7562err_out:
Matt Carlson21f581a2009-08-28 14:00:25 +00007563 tg3_rx_prodring_fini(tp, tpr);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007564 return -ENOMEM;
7565}
7566
7567/* Free up pending packets in all rx/tx rings.
7568 *
7569 * The chip has been shut down and the driver detached from
7570 * the networking, so no interrupts or new tx packets will
7571 * end up in the driver. tp->{tx,}lock is not held and we are not
7572 * in an interrupt context and thus may sleep.
7573 */
7574static void tg3_free_rings(struct tg3 *tp)
7575{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007576 int i, j;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007577
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007578 for (j = 0; j < tp->irq_cnt; j++) {
7579 struct tg3_napi *tnapi = &tp->napi[j];
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007580
Matt Carlson8fea32b2010-09-15 08:59:58 +00007581 tg3_rx_prodring_free(tp, &tnapi->prodring);
Matt Carlsonb28f6422010-06-05 17:24:32 +00007582
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007583 if (!tnapi->tx_buffers)
7584 continue;
7585
Matt Carlson0d681b22011-07-27 14:20:49 +00007586 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7587 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007588
Matt Carlson0d681b22011-07-27 14:20:49 +00007589 if (!skb)
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007590 continue;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007591
Matt Carlsonba1142e2011-11-04 09:15:00 +00007592 tg3_tx_skb_unmap(tnapi, i,
7593 skb_shinfo(skb)->nr_frags - 1);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007594
7595 dev_kfree_skb_any(skb);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007596 }
Tom Herbert5cb917b2012-03-05 19:53:50 +00007597 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007598 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007599}
7600
7601/* Initialize tx/rx rings for packet processing.
7602 *
7603 * The chip has been shut down and the driver detached from
7604 * the networking, so no interrupts or new tx packets will
7605 * end up in the driver. tp->{tx,}lock are held and thus
7606 * we may not sleep.
7607 */
7608static int tg3_init_rings(struct tg3 *tp)
7609{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007610 int i;
Matt Carlson72334482009-08-28 14:03:01 +00007611
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007612 /* Free up all the SKBs. */
7613 tg3_free_rings(tp);
7614
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007615 for (i = 0; i < tp->irq_cnt; i++) {
7616 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007617
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007618 tnapi->last_tag = 0;
7619 tnapi->last_irq_tag = 0;
7620 tnapi->hw_status->status = 0;
7621 tnapi->hw_status->status_tag = 0;
7622 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7623
7624 tnapi->tx_prod = 0;
7625 tnapi->tx_cons = 0;
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007626 if (tnapi->tx_ring)
7627 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007628
7629 tnapi->rx_rcb_ptr = 0;
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007630 if (tnapi->rx_rcb)
7631 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007632
Matt Carlson8fea32b2010-09-15 08:59:58 +00007633 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
Matt Carlsone4af1af2010-02-12 14:47:05 +00007634 tg3_free_rings(tp);
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007635 return -ENOMEM;
Matt Carlsone4af1af2010-02-12 14:47:05 +00007636 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007637 }
Matt Carlson72334482009-08-28 14:03:01 +00007638
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007639 return 0;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007640}
7641
Michael Chan49a359e2012-09-28 07:12:37 +00007642static void tg3_mem_tx_release(struct tg3 *tp)
7643{
7644 int i;
7645
7646 for (i = 0; i < tp->irq_max; i++) {
7647 struct tg3_napi *tnapi = &tp->napi[i];
7648
7649 if (tnapi->tx_ring) {
7650 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7651 tnapi->tx_ring, tnapi->tx_desc_mapping);
7652 tnapi->tx_ring = NULL;
7653 }
7654
7655 kfree(tnapi->tx_buffers);
7656 tnapi->tx_buffers = NULL;
7657 }
7658}
7659
7660static int tg3_mem_tx_acquire(struct tg3 *tp)
7661{
7662 int i;
7663 struct tg3_napi *tnapi = &tp->napi[0];
7664
7665 /* If multivector TSS is enabled, vector 0 does not handle
7666 * tx interrupts. Don't allocate any resources for it.
7667 */
7668 if (tg3_flag(tp, ENABLE_TSS))
7669 tnapi++;
7670
7671 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7672 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7673 TG3_TX_RING_SIZE, GFP_KERNEL);
7674 if (!tnapi->tx_buffers)
7675 goto err_out;
7676
7677 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7678 TG3_TX_RING_BYTES,
7679 &tnapi->tx_desc_mapping,
7680 GFP_KERNEL);
7681 if (!tnapi->tx_ring)
7682 goto err_out;
7683 }
7684
7685 return 0;
7686
7687err_out:
7688 tg3_mem_tx_release(tp);
7689 return -ENOMEM;
7690}
7691
7692static void tg3_mem_rx_release(struct tg3 *tp)
7693{
7694 int i;
7695
7696 for (i = 0; i < tp->irq_max; i++) {
7697 struct tg3_napi *tnapi = &tp->napi[i];
7698
7699 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7700
7701 if (!tnapi->rx_rcb)
7702 continue;
7703
7704 dma_free_coherent(&tp->pdev->dev,
7705 TG3_RX_RCB_RING_BYTES(tp),
7706 tnapi->rx_rcb,
7707 tnapi->rx_rcb_mapping);
7708 tnapi->rx_rcb = NULL;
7709 }
7710}
7711
7712static int tg3_mem_rx_acquire(struct tg3 *tp)
7713{
7714 unsigned int i, limit;
7715
7716 limit = tp->rxq_cnt;
7717
7718 /* If RSS is enabled, we need a (dummy) producer ring
7719 * set on vector zero. This is the true hw prodring.
7720 */
7721 if (tg3_flag(tp, ENABLE_RSS))
7722 limit++;
7723
7724 for (i = 0; i < limit; i++) {
7725 struct tg3_napi *tnapi = &tp->napi[i];
7726
7727 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7728 goto err_out;
7729
7730 /* If multivector RSS is enabled, vector 0
7731 * does not handle rx or tx interrupts.
7732 * Don't allocate any resources for it.
7733 */
7734 if (!i && tg3_flag(tp, ENABLE_RSS))
7735 continue;
7736
7737 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7738 TG3_RX_RCB_RING_BYTES(tp),
7739 &tnapi->rx_rcb_mapping,
7740 GFP_KERNEL);
7741 if (!tnapi->rx_rcb)
7742 goto err_out;
7743
7744 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7745 }
7746
7747 return 0;
7748
7749err_out:
7750 tg3_mem_rx_release(tp);
7751 return -ENOMEM;
7752}
7753
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007754/*
7755 * Must not be invoked with interrupt sources disabled and
7756 * the hardware shutdown down.
7757 */
7758static void tg3_free_consistent(struct tg3 *tp)
7759{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007760 int i;
Matt Carlson898a56f2009-08-28 14:02:40 +00007761
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007762 for (i = 0; i < tp->irq_cnt; i++) {
7763 struct tg3_napi *tnapi = &tp->napi[i];
7764
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007765 if (tnapi->hw_status) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007766 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7767 tnapi->hw_status,
7768 tnapi->status_mapping);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007769 tnapi->hw_status = NULL;
7770 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007771 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007772
Michael Chan49a359e2012-09-28 07:12:37 +00007773 tg3_mem_rx_release(tp);
7774 tg3_mem_tx_release(tp);
7775
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776 if (tp->hw_stats) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007777 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7778 tp->hw_stats, tp->stats_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007779 tp->hw_stats = NULL;
7780 }
7781}
7782
7783/*
7784 * Must not be invoked with interrupt sources disabled and
7785 * the hardware shutdown down. Can sleep.
7786 */
7787static int tg3_alloc_consistent(struct tg3 *tp)
7788{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007789 int i;
Matt Carlson898a56f2009-08-28 14:02:40 +00007790
Matt Carlson4bae65c2010-11-24 08:31:52 +00007791 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7792 sizeof(struct tg3_hw_stats),
7793 &tp->stats_mapping,
7794 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007795 if (!tp->hw_stats)
7796 goto err_out;
7797
Linus Torvalds1da177e2005-04-16 15:20:36 -07007798 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7799
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007800 for (i = 0; i < tp->irq_cnt; i++) {
7801 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007802 struct tg3_hw_status *sblk;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007803
Matt Carlson4bae65c2010-11-24 08:31:52 +00007804 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7805 TG3_HW_STATUS_SIZE,
7806 &tnapi->status_mapping,
7807 GFP_KERNEL);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007808 if (!tnapi->hw_status)
7809 goto err_out;
7810
7811 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007812 sblk = tnapi->hw_status;
7813
Michael Chan49a359e2012-09-28 07:12:37 +00007814 if (tg3_flag(tp, ENABLE_RSS)) {
Michael Chan86449942012-10-02 20:31:14 -07007815 u16 *prodptr = NULL;
Matt Carlson8fea32b2010-09-15 08:59:58 +00007816
Michael Chan49a359e2012-09-28 07:12:37 +00007817 /*
7818 * When RSS is enabled, the status block format changes
7819 * slightly. The "rx_jumbo_consumer", "reserved",
7820 * and "rx_mini_consumer" members get mapped to the
7821 * other three rx return ring producer indexes.
7822 */
7823 switch (i) {
7824 case 1:
7825 prodptr = &sblk->idx[0].rx_producer;
7826 break;
7827 case 2:
7828 prodptr = &sblk->rx_jumbo_consumer;
7829 break;
7830 case 3:
7831 prodptr = &sblk->reserved;
7832 break;
7833 case 4:
7834 prodptr = &sblk->rx_mini_consumer;
Matt Carlsonf891ea12012-04-24 13:37:01 +00007835 break;
7836 }
Michael Chan49a359e2012-09-28 07:12:37 +00007837 tnapi->rx_rcb_prod_idx = prodptr;
7838 } else {
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007839 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007840 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007841 }
7842
Michael Chan49a359e2012-09-28 07:12:37 +00007843 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7844 goto err_out;
7845
Linus Torvalds1da177e2005-04-16 15:20:36 -07007846 return 0;
7847
7848err_out:
7849 tg3_free_consistent(tp);
7850 return -ENOMEM;
7851}
7852
7853#define MAX_WAIT_CNT 1000
7854
7855/* To stop a block, clear the enable bit and poll till it
7856 * clears. tp->lock is held.
7857 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007858static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007859{
7860 unsigned int i;
7861 u32 val;
7862
Joe Perches63c3a662011-04-26 08:12:10 +00007863 if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007864 switch (ofs) {
7865 case RCVLSC_MODE:
7866 case DMAC_MODE:
7867 case MBFREE_MODE:
7868 case BUFMGR_MODE:
7869 case MEMARB_MODE:
7870 /* We can't enable/disable these bits of the
7871 * 5705/5750, just say success.
7872 */
7873 return 0;
7874
7875 default:
7876 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007878 }
7879
7880 val = tr32(ofs);
7881 val &= ~enable_bit;
7882 tw32_f(ofs, val);
7883
7884 for (i = 0; i < MAX_WAIT_CNT; i++) {
7885 udelay(100);
7886 val = tr32(ofs);
7887 if ((val & enable_bit) == 0)
7888 break;
7889 }
7890
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007891 if (i == MAX_WAIT_CNT && !silent) {
Matt Carlson2445e462010-04-05 10:19:21 +00007892 dev_err(&tp->pdev->dev,
7893 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7894 ofs, enable_bit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007895 return -ENODEV;
7896 }
7897
7898 return 0;
7899}
7900
7901/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007902static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007903{
7904 int i, err;
7905
7906 tg3_disable_ints(tp);
7907
7908 tp->rx_mode &= ~RX_MODE_ENABLE;
7909 tw32_f(MAC_RX_MODE, tp->rx_mode);
7910 udelay(10);
7911
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007912 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7913 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7914 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7915 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7916 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7917 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007918
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007919 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7920 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7921 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7922 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7923 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7924 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7925 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007926
7927 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7928 tw32_f(MAC_MODE, tp->mac_mode);
7929 udelay(40);
7930
7931 tp->tx_mode &= ~TX_MODE_ENABLE;
7932 tw32_f(MAC_TX_MODE, tp->tx_mode);
7933
7934 for (i = 0; i < MAX_WAIT_CNT; i++) {
7935 udelay(100);
7936 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7937 break;
7938 }
7939 if (i >= MAX_WAIT_CNT) {
Matt Carlsonab96b242010-04-05 10:19:22 +00007940 dev_err(&tp->pdev->dev,
7941 "%s timed out, TX_MODE_ENABLE will not clear "
7942 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07007943 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944 }
7945
Michael Chane6de8ad2005-05-05 14:42:41 -07007946 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007947 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7948 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949
7950 tw32(FTQ_RESET, 0xffffffff);
7951 tw32(FTQ_RESET, 0x00000000);
7952
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007953 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7954 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007955
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007956 for (i = 0; i < tp->irq_cnt; i++) {
7957 struct tg3_napi *tnapi = &tp->napi[i];
7958 if (tnapi->hw_status)
7959 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961
Linus Torvalds1da177e2005-04-16 15:20:36 -07007962 return err;
7963}
7964
Michael Chanee6a99b2007-07-18 21:49:10 -07007965/* Save PCI command register before chip reset */
7966static void tg3_save_pci_state(struct tg3 *tp)
7967{
Matt Carlson8a6eac92007-10-21 16:17:55 -07007968 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07007969}
7970
7971/* Restore PCI state after chip reset */
7972static void tg3_restore_pci_state(struct tg3 *tp)
7973{
7974 u32 val;
7975
7976 /* Re-enable indirect register accesses. */
7977 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7978 tp->misc_host_ctrl);
7979
7980 /* Set MAX PCI retry to zero. */
7981 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7982 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
Joe Perches63c3a662011-04-26 08:12:10 +00007983 tg3_flag(tp, PCIX_MODE))
Michael Chanee6a99b2007-07-18 21:49:10 -07007984 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07007985 /* Allow reads and writes to the APE register and memory space. */
Joe Perches63c3a662011-04-26 08:12:10 +00007986 if (tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -07007987 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +00007988 PCISTATE_ALLOW_APE_SHMEM_WR |
7989 PCISTATE_ALLOW_APE_PSPACE_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07007990 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7991
Matt Carlson8a6eac92007-10-21 16:17:55 -07007992 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07007993
Matt Carlson2c55a3d2011-11-28 09:41:04 +00007994 if (!tg3_flag(tp, PCI_EXPRESS)) {
7995 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7996 tp->pci_cacheline_sz);
7997 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7998 tp->pci_lat_timer);
Michael Chan114342f2007-10-15 02:12:26 -07007999 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08008000
Michael Chanee6a99b2007-07-18 21:49:10 -07008001 /* Make sure PCI-X relaxed ordering bit is clear. */
Joe Perches63c3a662011-04-26 08:12:10 +00008002 if (tg3_flag(tp, PCIX_MODE)) {
Matt Carlson9974a352007-10-07 23:27:28 -07008003 u16 pcix_cmd;
8004
8005 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8006 &pcix_cmd);
8007 pcix_cmd &= ~PCI_X_CMD_ERO;
8008 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8009 pcix_cmd);
8010 }
Michael Chanee6a99b2007-07-18 21:49:10 -07008011
Joe Perches63c3a662011-04-26 08:12:10 +00008012 if (tg3_flag(tp, 5780_CLASS)) {
Michael Chanee6a99b2007-07-18 21:49:10 -07008013
8014 /* Chip reset on 5780 will reset MSI enable bit,
8015 * so need to restore it.
8016 */
Joe Perches63c3a662011-04-26 08:12:10 +00008017 if (tg3_flag(tp, USING_MSI)) {
Michael Chanee6a99b2007-07-18 21:49:10 -07008018 u16 ctrl;
8019
8020 pci_read_config_word(tp->pdev,
8021 tp->msi_cap + PCI_MSI_FLAGS,
8022 &ctrl);
8023 pci_write_config_word(tp->pdev,
8024 tp->msi_cap + PCI_MSI_FLAGS,
8025 ctrl | PCI_MSI_FLAGS_ENABLE);
8026 val = tr32(MSGINT_MODE);
8027 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8028 }
8029 }
8030}
8031
Linus Torvalds1da177e2005-04-16 15:20:36 -07008032/* tp->lock is held. */
8033static int tg3_chip_reset(struct tg3 *tp)
8034{
8035 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07008036 void (*write_op)(struct tg3 *, u32, u32);
Matt Carlson4f125f42009-09-01 12:55:02 +00008037 int i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008038
David S. Millerf49639e2006-06-09 11:58:36 -07008039 tg3_nvram_lock(tp);
8040
Matt Carlson77b483f2008-08-15 14:07:24 -07008041 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8042
David S. Millerf49639e2006-06-09 11:58:36 -07008043 /* No matching tg3_nvram_unlock() after this because
8044 * chip reset below will undo the nvram lock.
8045 */
8046 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008047
Michael Chanee6a99b2007-07-18 21:49:10 -07008048 /* GRC_MISC_CFG core clock reset will clear the memory
8049 * enable bit in PCI register 4 and the MSI enable bit
8050 * on some chips, so we save relevant registers here.
8051 */
8052 tg3_save_pci_state(tp);
8053
Michael Chand9ab5ad12006-03-20 22:27:35 -08008054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Joe Perches63c3a662011-04-26 08:12:10 +00008055 tg3_flag(tp, 5755_PLUS))
Michael Chand9ab5ad12006-03-20 22:27:35 -08008056 tw32(GRC_FASTBOOT_PC, 0);
8057
Linus Torvalds1da177e2005-04-16 15:20:36 -07008058 /*
8059 * We must avoid the readl() that normally takes place.
8060 * It locks machines, causes machine checks, and other
8061 * fun things. So, temporarily disable the 5701
8062 * hardware workaround, while we do the reset.
8063 */
Michael Chan1ee582d2005-08-09 20:16:46 -07008064 write_op = tp->write32;
8065 if (write_op == tg3_write_flush_reg32)
8066 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008067
Michael Chand18edcb2007-03-24 20:57:11 -07008068 /* Prevent the irq handler from reading or writing PCI registers
8069 * during chip reset when the memory enable bit in the PCI command
8070 * register may be cleared. The chip does not generate interrupt
8071 * at this time, but the irq handler may still be called due to irq
8072 * sharing or irqpoll.
8073 */
Joe Perches63c3a662011-04-26 08:12:10 +00008074 tg3_flag_set(tp, CHIP_RESETTING);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008075 for (i = 0; i < tp->irq_cnt; i++) {
8076 struct tg3_napi *tnapi = &tp->napi[i];
8077 if (tnapi->hw_status) {
8078 tnapi->hw_status->status = 0;
8079 tnapi->hw_status->status_tag = 0;
8080 }
8081 tnapi->last_tag = 0;
8082 tnapi->last_irq_tag = 0;
Michael Chanb8fa2f32007-04-06 17:35:37 -07008083 }
Michael Chand18edcb2007-03-24 20:57:11 -07008084 smp_mb();
Matt Carlson4f125f42009-09-01 12:55:02 +00008085
8086 for (i = 0; i < tp->irq_cnt; i++)
8087 synchronize_irq(tp->napi[i].irq_vec);
Michael Chand18edcb2007-03-24 20:57:11 -07008088
Matt Carlson255ca312009-08-25 10:07:27 +00008089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8090 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8091 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8092 }
8093
Linus Torvalds1da177e2005-04-16 15:20:36 -07008094 /* do the reset */
8095 val = GRC_MISC_CFG_CORECLK_RESET;
8096
Joe Perches63c3a662011-04-26 08:12:10 +00008097 if (tg3_flag(tp, PCI_EXPRESS)) {
Matt Carlson88075d92010-08-02 11:25:58 +00008098 /* Force PCIe 1.0a mode */
8099 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008100 !tg3_flag(tp, 57765_PLUS) &&
Matt Carlson88075d92010-08-02 11:25:58 +00008101 tr32(TG3_PCIE_PHY_TSTCTL) ==
8102 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8103 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8104
Linus Torvalds1da177e2005-04-16 15:20:36 -07008105 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8106 tw32(GRC_MISC_CFG, (1 << 29));
8107 val |= (1 << 29);
8108 }
8109 }
8110
Michael Chanb5d37722006-09-27 16:06:21 -07008111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8112 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8113 tw32(GRC_VCPU_EXT_CTRL,
8114 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8115 }
8116
Matt Carlsonf37500d2010-08-02 11:25:59 +00008117 /* Manage gphy power for all CPMU absent PCIe devices. */
Joe Perches63c3a662011-04-26 08:12:10 +00008118 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
Matt Carlsonf37500d2010-08-02 11:25:59 +00008120
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121 tw32(GRC_MISC_CFG, val);
8122
Michael Chan1ee582d2005-08-09 20:16:46 -07008123 /* restore 5701 hardware bug workaround write method */
8124 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008125
8126 /* Unfortunately, we have to delay before the PCI read back.
8127 * Some 575X chips even will not respond to a PCI cfg access
8128 * when the reset command is given to the chip.
8129 *
8130 * How do these hardware designers expect things to work
8131 * properly if the PCI write is posted for a long period
8132 * of time? It is always necessary to have some method by
8133 * which a register read back can occur to push the write
8134 * out which does the reset.
8135 *
8136 * For most tg3 variants the trick below was working.
8137 * Ho hum...
8138 */
8139 udelay(120);
8140
8141 /* Flush PCI posted writes. The normal MMIO registers
8142 * are inaccessible at this time so this is the only
8143 * way to make this reliably (actually, this is no longer
8144 * the case, see above). I tried to use indirect
8145 * register read/write but this upset some 5701 variants.
8146 */
8147 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8148
8149 udelay(120);
8150
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008151 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
Matt Carlsone7126992009-08-25 10:08:16 +00008152 u16 val16;
8153
Linus Torvalds1da177e2005-04-16 15:20:36 -07008154 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
Michael Chan86449942012-10-02 20:31:14 -07008155 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008156 u32 cfg_val;
8157
8158 /* Wait for link training to complete. */
Michael Chan86449942012-10-02 20:31:14 -07008159 for (j = 0; j < 5000; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008160 udelay(100);
8161
8162 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8163 pci_write_config_dword(tp->pdev, 0xc4,
8164 cfg_val | (1 << 15));
8165 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008166
Matt Carlsone7126992009-08-25 10:08:16 +00008167 /* Clear the "no snoop" and "relaxed ordering" bits. */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008168 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
Matt Carlsone7126992009-08-25 10:08:16 +00008169 /*
8170 * Older PCIe devices only support the 128 byte
8171 * MPS setting. Enforce the restriction.
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008172 */
Joe Perches63c3a662011-04-26 08:12:10 +00008173 if (!tg3_flag(tp, CPMU_PRESENT))
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008174 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8175 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008176
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008177 /* Clear error status */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008178 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008179 PCI_EXP_DEVSTA_CED |
8180 PCI_EXP_DEVSTA_NFED |
8181 PCI_EXP_DEVSTA_FED |
8182 PCI_EXP_DEVSTA_URD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008183 }
8184
Michael Chanee6a99b2007-07-18 21:49:10 -07008185 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008186
Joe Perches63c3a662011-04-26 08:12:10 +00008187 tg3_flag_clear(tp, CHIP_RESETTING);
8188 tg3_flag_clear(tp, ERROR_PROCESSED);
Michael Chand18edcb2007-03-24 20:57:11 -07008189
Michael Chanee6a99b2007-07-18 21:49:10 -07008190 val = 0;
Joe Perches63c3a662011-04-26 08:12:10 +00008191 if (tg3_flag(tp, 5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -07008192 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07008193 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008194
8195 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8196 tg3_stop_fw(tp);
8197 tw32(0x5000, 0x400);
8198 }
8199
8200 tw32(GRC_MODE, tp->grc_mode);
8201
8202 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008203 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008204
8205 tw32(0xc4, val | (1 << 15));
8206 }
8207
8208 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8210 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8211 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8212 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8213 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8214 }
8215
Matt Carlsonf07e9af2010-08-02 11:26:07 +00008216 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Matt Carlson9e975cc2011-07-20 10:20:50 +00008217 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008218 val = tp->mac_mode;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00008219 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
Matt Carlson9e975cc2011-07-20 10:20:50 +00008220 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008221 val = tp->mac_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008222 } else
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008223 val = 0;
8224
8225 tw32_f(MAC_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008226 udelay(40);
8227
Matt Carlson77b483f2008-08-15 14:07:24 -07008228 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8229
Michael Chan7a6f4362006-09-27 16:03:31 -07008230 err = tg3_poll_fw(tp);
8231 if (err)
8232 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008233
Matt Carlson0a9140c2009-08-28 12:27:50 +00008234 tg3_mdio_start(tp);
8235
Joe Perches63c3a662011-04-26 08:12:10 +00008236 if (tg3_flag(tp, PCI_EXPRESS) &&
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00008237 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8238 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008239 !tg3_flag(tp, 57765_PLUS)) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008240 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008241
8242 tw32(0x7c00, val | (1 << 25));
8243 }
8244
Matt Carlsond78b59f2011-04-05 14:22:46 +00008245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8246 val = tr32(TG3_CPMU_CLCK_ORIDE);
8247 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8248 }
8249
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250 /* Reprobe ASF enable state. */
Joe Perches63c3a662011-04-26 08:12:10 +00008251 tg3_flag_clear(tp, ENABLE_ASF);
8252 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008253 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8254 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8255 u32 nic_cfg;
8256
8257 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8258 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
Joe Perches63c3a662011-04-26 08:12:10 +00008259 tg3_flag_set(tp, ENABLE_ASF);
Matt Carlson4ba526c2008-08-15 14:10:04 -07008260 tp->last_event_jiffies = jiffies;
Joe Perches63c3a662011-04-26 08:12:10 +00008261 if (tg3_flag(tp, 5750_PLUS))
8262 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008263 }
8264 }
8265
8266 return 0;
8267}
8268
Matt Carlson65ec6982012-02-28 23:33:37 +00008269static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8270static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
Matt Carlson92feeab2011-12-08 14:40:14 +00008271
Linus Torvalds1da177e2005-04-16 15:20:36 -07008272/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07008273static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008274{
8275 int err;
8276
8277 tg3_stop_fw(tp);
8278
Michael Chan944d9802005-05-29 14:57:48 -07008279 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008280
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008281 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008282 err = tg3_chip_reset(tp);
8283
Matt Carlsondaba2a62009-04-20 06:58:52 +00008284 __tg3_set_mac_addr(tp, 0);
8285
Michael Chan944d9802005-05-29 14:57:48 -07008286 tg3_write_sig_legacy(tp, kind);
8287 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008288
Matt Carlson92feeab2011-12-08 14:40:14 +00008289 if (tp->hw_stats) {
8290 /* Save the stats across chip resets... */
David S. Millerb4017c52012-03-01 17:57:40 -05008291 tg3_get_nstats(tp, &tp->net_stats_prev);
Matt Carlson92feeab2011-12-08 14:40:14 +00008292 tg3_get_estats(tp, &tp->estats_prev);
8293
8294 /* And make sure the next sample is new data */
8295 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8296 }
8297
Linus Torvalds1da177e2005-04-16 15:20:36 -07008298 if (err)
8299 return err;
8300
8301 return 0;
8302}
8303
Linus Torvalds1da177e2005-04-16 15:20:36 -07008304static int tg3_set_mac_addr(struct net_device *dev, void *p)
8305{
8306 struct tg3 *tp = netdev_priv(dev);
8307 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07008308 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008309
Michael Chanf9804dd2005-09-27 12:13:10 -07008310 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00008311 return -EADDRNOTAVAIL;
Michael Chanf9804dd2005-09-27 12:13:10 -07008312
Linus Torvalds1da177e2005-04-16 15:20:36 -07008313 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8314
Michael Chane75f7c92006-03-20 21:33:26 -08008315 if (!netif_running(dev))
8316 return 0;
8317
Joe Perches63c3a662011-04-26 08:12:10 +00008318 if (tg3_flag(tp, ENABLE_ASF)) {
Michael Chan986e0ae2007-05-05 12:10:20 -07008319 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07008320
Michael Chan986e0ae2007-05-05 12:10:20 -07008321 addr0_high = tr32(MAC_ADDR_0_HIGH);
8322 addr0_low = tr32(MAC_ADDR_0_LOW);
8323 addr1_high = tr32(MAC_ADDR_1_HIGH);
8324 addr1_low = tr32(MAC_ADDR_1_LOW);
8325
8326 /* Skip MAC addr 1 if ASF is using it. */
8327 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8328 !(addr1_high == 0 && addr1_low == 0))
8329 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07008330 }
Michael Chan986e0ae2007-05-05 12:10:20 -07008331 spin_lock_bh(&tp->lock);
8332 __tg3_set_mac_addr(tp, skip_mac_1);
8333 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008334
Michael Chanb9ec6c12006-07-25 16:37:27 -07008335 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008336}
8337
8338/* tp->lock is held. */
8339static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8340 dma_addr_t mapping, u32 maxlen_flags,
8341 u32 nic_addr)
8342{
8343 tg3_write_mem(tp,
8344 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8345 ((u64) mapping >> 32));
8346 tg3_write_mem(tp,
8347 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8348 ((u64) mapping & 0xffffffff));
8349 tg3_write_mem(tp,
8350 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8351 maxlen_flags);
8352
Joe Perches63c3a662011-04-26 08:12:10 +00008353 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008354 tg3_write_mem(tp,
8355 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8356 nic_addr);
8357}
8358
Michael Chana489b6d2012-09-28 07:12:39 +00008359
8360static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07008361{
Michael Chana489b6d2012-09-28 07:12:39 +00008362 int i = 0;
Matt Carlsonb6080e12009-09-01 13:12:00 +00008363
Joe Perches63c3a662011-04-26 08:12:10 +00008364 if (!tg3_flag(tp, ENABLE_TSS)) {
Matt Carlsonb6080e12009-09-01 13:12:00 +00008365 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8366 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8367 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
Matt Carlsonb6080e12009-09-01 13:12:00 +00008368 } else {
8369 tw32(HOSTCC_TXCOL_TICKS, 0);
8370 tw32(HOSTCC_TXMAX_FRAMES, 0);
8371 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
Michael Chana489b6d2012-09-28 07:12:39 +00008372
8373 for (; i < tp->txq_cnt; i++) {
8374 u32 reg;
8375
8376 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8377 tw32(reg, ec->tx_coalesce_usecs);
8378 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8379 tw32(reg, ec->tx_max_coalesced_frames);
8380 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8381 tw32(reg, ec->tx_max_coalesced_frames_irq);
8382 }
Matt Carlson19cfaec2009-12-03 08:36:20 +00008383 }
Matt Carlsonb6080e12009-09-01 13:12:00 +00008384
Michael Chana489b6d2012-09-28 07:12:39 +00008385 for (; i < tp->irq_max - 1; i++) {
8386 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8387 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8388 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8389 }
8390}
8391
8392static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8393{
8394 int i = 0;
8395 u32 limit = tp->rxq_cnt;
8396
Joe Perches63c3a662011-04-26 08:12:10 +00008397 if (!tg3_flag(tp, ENABLE_RSS)) {
Matt Carlson19cfaec2009-12-03 08:36:20 +00008398 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8399 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8400 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
Michael Chana489b6d2012-09-28 07:12:39 +00008401 limit--;
Matt Carlson19cfaec2009-12-03 08:36:20 +00008402 } else {
Matt Carlsonb6080e12009-09-01 13:12:00 +00008403 tw32(HOSTCC_RXCOL_TICKS, 0);
8404 tw32(HOSTCC_RXMAX_FRAMES, 0);
8405 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
David S. Miller15f98502005-05-18 22:49:26 -07008406 }
Matt Carlsonb6080e12009-09-01 13:12:00 +00008407
Michael Chana489b6d2012-09-28 07:12:39 +00008408 for (; i < limit; i++) {
8409 u32 reg;
8410
8411 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8412 tw32(reg, ec->rx_coalesce_usecs);
8413 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8414 tw32(reg, ec->rx_max_coalesced_frames);
8415 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8416 tw32(reg, ec->rx_max_coalesced_frames_irq);
8417 }
8418
8419 for (; i < tp->irq_max - 1; i++) {
8420 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8421 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8422 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8423 }
8424}
8425
8426static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8427{
8428 tg3_coal_tx_init(tp, ec);
8429 tg3_coal_rx_init(tp, ec);
8430
Joe Perches63c3a662011-04-26 08:12:10 +00008431 if (!tg3_flag(tp, 5705_PLUS)) {
David S. Miller15f98502005-05-18 22:49:26 -07008432 u32 val = ec->stats_block_coalesce_usecs;
8433
Matt Carlsonb6080e12009-09-01 13:12:00 +00008434 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8435 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8436
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00008437 if (!tp->link_up)
David S. Miller15f98502005-05-18 22:49:26 -07008438 val = 0;
8439
8440 tw32(HOSTCC_STAT_COAL_TICKS, val);
8441 }
8442}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008443
8444/* tp->lock is held. */
Matt Carlson2d31eca2009-09-01 12:53:31 +00008445static void tg3_rings_reset(struct tg3 *tp)
8446{
8447 int i;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008448 u32 stblk, txrcb, rxrcb, limit;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008449 struct tg3_napi *tnapi = &tp->napi[0];
8450
8451 /* Disable all transmit rings but the first. */
Joe Perches63c3a662011-04-26 08:12:10 +00008452 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008453 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
Joe Perches63c3a662011-04-26 08:12:10 +00008454 else if (tg3_flag(tp, 5717_PLUS))
Matt Carlson3d377282010-10-14 10:37:39 +00008455 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
Matt Carlson55086ad2011-12-14 11:09:59 +00008456 else if (tg3_flag(tp, 57765_CLASS))
Matt Carlsonb703df62009-12-03 08:36:21 +00008457 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008458 else
8459 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8460
8461 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8462 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8463 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8464 BDINFO_FLAGS_DISABLED);
8465
8466
8467 /* Disable all receive return rings but the first. */
Joe Perches63c3a662011-04-26 08:12:10 +00008468 if (tg3_flag(tp, 5717_PLUS))
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00008469 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
Joe Perches63c3a662011-04-26 08:12:10 +00008470 else if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008471 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
Matt Carlsonb703df62009-12-03 08:36:21 +00008472 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00008473 tg3_flag(tp, 57765_CLASS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008474 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8475 else
8476 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8477
8478 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8479 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8480 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8481 BDINFO_FLAGS_DISABLED);
8482
8483 /* Disable interrupts */
8484 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00008485 tp->napi[0].chk_msi_cnt = 0;
8486 tp->napi[0].last_rx_cons = 0;
8487 tp->napi[0].last_tx_cons = 0;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008488
8489 /* Zero mailbox registers. */
Joe Perches63c3a662011-04-26 08:12:10 +00008490 if (tg3_flag(tp, SUPPORT_MSIX)) {
Matt Carlson6fd45cb2010-09-15 08:59:57 +00008491 for (i = 1; i < tp->irq_max; i++) {
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008492 tp->napi[i].tx_prod = 0;
8493 tp->napi[i].tx_cons = 0;
Joe Perches63c3a662011-04-26 08:12:10 +00008494 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc2353a32010-01-20 16:58:08 +00008495 tw32_mailbox(tp->napi[i].prodmbox, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008496 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8497 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
Matt Carlson7f230732011-08-31 11:44:48 +00008498 tp->napi[i].chk_msi_cnt = 0;
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00008499 tp->napi[i].last_rx_cons = 0;
8500 tp->napi[i].last_tx_cons = 0;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008501 }
Joe Perches63c3a662011-04-26 08:12:10 +00008502 if (!tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc2353a32010-01-20 16:58:08 +00008503 tw32_mailbox(tp->napi[0].prodmbox, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008504 } else {
8505 tp->napi[0].tx_prod = 0;
8506 tp->napi[0].tx_cons = 0;
8507 tw32_mailbox(tp->napi[0].prodmbox, 0);
8508 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8509 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008510
8511 /* Make sure the NIC-based send BD rings are disabled. */
Joe Perches63c3a662011-04-26 08:12:10 +00008512 if (!tg3_flag(tp, 5705_PLUS)) {
Matt Carlson2d31eca2009-09-01 12:53:31 +00008513 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8514 for (i = 0; i < 16; i++)
8515 tw32_tx_mbox(mbox + i * 8, 0);
8516 }
8517
8518 txrcb = NIC_SRAM_SEND_RCB;
8519 rxrcb = NIC_SRAM_RCV_RET_RCB;
8520
8521 /* Clear status block in ram. */
8522 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8523
8524 /* Set status block DMA address */
8525 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8526 ((u64) tnapi->status_mapping >> 32));
8527 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8528 ((u64) tnapi->status_mapping & 0xffffffff));
8529
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008530 if (tnapi->tx_ring) {
8531 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8532 (TG3_TX_RING_SIZE <<
8533 BDINFO_FLAGS_MAXLEN_SHIFT),
8534 NIC_SRAM_TX_BUFFER_DESC);
8535 txrcb += TG3_BDINFO_SIZE;
8536 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008537
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008538 if (tnapi->rx_rcb) {
8539 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
Matt Carlson7cb32cf2010-09-30 10:34:36 +00008540 (tp->rx_ret_ring_mask + 1) <<
8541 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008542 rxrcb += TG3_BDINFO_SIZE;
8543 }
8544
8545 stblk = HOSTCC_STATBLCK_RING1;
8546
8547 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8548 u64 mapping = (u64)tnapi->status_mapping;
8549 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8550 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8551
8552 /* Clear status block in ram. */
8553 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8554
Matt Carlson19cfaec2009-12-03 08:36:20 +00008555 if (tnapi->tx_ring) {
8556 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8557 (TG3_TX_RING_SIZE <<
8558 BDINFO_FLAGS_MAXLEN_SHIFT),
8559 NIC_SRAM_TX_BUFFER_DESC);
8560 txrcb += TG3_BDINFO_SIZE;
8561 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008562
8563 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
Matt Carlson7cb32cf2010-09-30 10:34:36 +00008564 ((tp->rx_ret_ring_mask + 1) <<
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008565 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8566
8567 stblk += 8;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008568 rxrcb += TG3_BDINFO_SIZE;
8569 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008570}
8571
Matt Carlsoneb07a942011-04-20 07:57:36 +00008572static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8573{
8574 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8575
Joe Perches63c3a662011-04-26 08:12:10 +00008576 if (!tg3_flag(tp, 5750_PLUS) ||
8577 tg3_flag(tp, 5780_CLASS) ||
Matt Carlsoneb07a942011-04-20 07:57:36 +00008578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Matt Carlson513aa6e2011-11-21 15:01:18 +00008579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8580 tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008581 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8582 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8584 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8585 else
8586 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8587
8588 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8589 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8590
8591 val = min(nic_rep_thresh, host_rep_thresh);
8592 tw32(RCVBDI_STD_THRESH, val);
8593
Joe Perches63c3a662011-04-26 08:12:10 +00008594 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008595 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8596
Joe Perches63c3a662011-04-26 08:12:10 +00008597 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008598 return;
8599
Matt Carlson513aa6e2011-11-21 15:01:18 +00008600 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
Matt Carlsoneb07a942011-04-20 07:57:36 +00008601
8602 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8603
8604 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8605 tw32(RCVBDI_JUMBO_THRESH, val);
8606
Joe Perches63c3a662011-04-26 08:12:10 +00008607 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008608 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8609}
8610
Matt Carlsonccd5ba92012-02-13 10:20:08 +00008611static inline u32 calc_crc(unsigned char *buf, int len)
8612{
8613 u32 reg;
8614 u32 tmp;
8615 int j, k;
8616
8617 reg = 0xffffffff;
8618
8619 for (j = 0; j < len; j++) {
8620 reg ^= buf[j];
8621
8622 for (k = 0; k < 8; k++) {
8623 tmp = reg & 0x01;
8624
8625 reg >>= 1;
8626
8627 if (tmp)
8628 reg ^= 0xedb88320;
8629 }
8630 }
8631
8632 return ~reg;
8633}
8634
8635static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8636{
8637 /* accept or reject all multicast frames */
8638 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8639 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8640 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8641 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8642}
8643
8644static void __tg3_set_rx_mode(struct net_device *dev)
8645{
8646 struct tg3 *tp = netdev_priv(dev);
8647 u32 rx_mode;
8648
8649 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8650 RX_MODE_KEEP_VLAN_TAG);
8651
8652#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8653 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8654 * flag clear.
8655 */
8656 if (!tg3_flag(tp, ENABLE_ASF))
8657 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8658#endif
8659
8660 if (dev->flags & IFF_PROMISC) {
8661 /* Promiscuous mode. */
8662 rx_mode |= RX_MODE_PROMISC;
8663 } else if (dev->flags & IFF_ALLMULTI) {
8664 /* Accept all multicast. */
8665 tg3_set_multi(tp, 1);
8666 } else if (netdev_mc_empty(dev)) {
8667 /* Reject all multicast. */
8668 tg3_set_multi(tp, 0);
8669 } else {
8670 /* Accept one or more multicast(s). */
8671 struct netdev_hw_addr *ha;
8672 u32 mc_filter[4] = { 0, };
8673 u32 regidx;
8674 u32 bit;
8675 u32 crc;
8676
8677 netdev_for_each_mc_addr(ha, dev) {
8678 crc = calc_crc(ha->addr, ETH_ALEN);
8679 bit = ~crc & 0x7f;
8680 regidx = (bit & 0x60) >> 5;
8681 bit &= 0x1f;
8682 mc_filter[regidx] |= (1 << bit);
8683 }
8684
8685 tw32(MAC_HASH_REG_0, mc_filter[0]);
8686 tw32(MAC_HASH_REG_1, mc_filter[1]);
8687 tw32(MAC_HASH_REG_2, mc_filter[2]);
8688 tw32(MAC_HASH_REG_3, mc_filter[3]);
8689 }
8690
8691 if (rx_mode != tp->rx_mode) {
8692 tp->rx_mode = rx_mode;
8693 tw32_f(MAC_RX_MODE, rx_mode);
8694 udelay(10);
8695 }
8696}
8697
Michael Chan91024262012-09-28 07:12:38 +00008698static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
Matt Carlson90415472011-12-16 13:33:23 +00008699{
8700 int i;
8701
8702 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
Michael Chan91024262012-09-28 07:12:38 +00008703 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
Matt Carlson90415472011-12-16 13:33:23 +00008704}
8705
8706static void tg3_rss_check_indir_tbl(struct tg3 *tp)
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008707{
8708 int i;
8709
8710 if (!tg3_flag(tp, SUPPORT_MSIX))
8711 return;
8712
Michael Chan0b3ba052012-11-14 14:44:29 +00008713 if (tp->rxq_cnt == 1) {
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008714 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
Matt Carlson90415472011-12-16 13:33:23 +00008715 return;
8716 }
8717
8718 /* Validate table against current IRQ count */
8719 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
Michael Chan0b3ba052012-11-14 14:44:29 +00008720 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
Matt Carlson90415472011-12-16 13:33:23 +00008721 break;
8722 }
8723
8724 if (i != TG3_RSS_INDIR_TBL_SIZE)
Michael Chan91024262012-09-28 07:12:38 +00008725 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008726}
8727
Matt Carlson90415472011-12-16 13:33:23 +00008728static void tg3_rss_write_indir_tbl(struct tg3 *tp)
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008729{
8730 int i = 0;
8731 u32 reg = MAC_RSS_INDIR_TBL_0;
8732
8733 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8734 u32 val = tp->rss_ind_tbl[i];
8735 i++;
8736 for (; i % 8; i++) {
8737 val <<= 4;
8738 val |= tp->rss_ind_tbl[i];
8739 }
8740 tw32(reg, val);
8741 reg += 4;
8742 }
8743}
8744
Matt Carlson2d31eca2009-09-01 12:53:31 +00008745/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008746static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008747{
8748 u32 val, rdmac_mode;
8749 int i, err, limit;
Matt Carlson8fea32b2010-09-15 08:59:58 +00008750 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008751
8752 tg3_disable_ints(tp);
8753
8754 tg3_stop_fw(tp);
8755
8756 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8757
Joe Perches63c3a662011-04-26 08:12:10 +00008758 if (tg3_flag(tp, INIT_COMPLETE))
Michael Chane6de8ad2005-05-05 14:42:41 -07008759 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008760
Matt Carlson699c0192010-12-06 08:28:51 +00008761 /* Enable MAC control of LPI */
8762 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8763 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8764 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8765 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8766
8767 tw32_f(TG3_CPMU_EEE_CTRL,
8768 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8769
Matt Carlsona386b902010-12-06 08:28:53 +00008770 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8771 TG3_CPMU_EEEMD_LPI_IN_TX |
8772 TG3_CPMU_EEEMD_LPI_IN_RX |
8773 TG3_CPMU_EEEMD_EEE_ENABLE;
8774
8775 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8776 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8777
Joe Perches63c3a662011-04-26 08:12:10 +00008778 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsona386b902010-12-06 08:28:53 +00008779 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8780
8781 tw32_f(TG3_CPMU_EEE_MODE, val);
8782
8783 tw32_f(TG3_CPMU_EEE_DBTMR1,
8784 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8785 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8786
8787 tw32_f(TG3_CPMU_EEE_DBTMR2,
Matt Carlsond7f2ab22011-01-25 15:58:56 +00008788 TG3_CPMU_DBTMR2_APE_TX_2047US |
Matt Carlsona386b902010-12-06 08:28:53 +00008789 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
Matt Carlson699c0192010-12-06 08:28:51 +00008790 }
8791
Matt Carlson603f1172010-02-12 14:47:10 +00008792 if (reset_phy)
Michael Chand4d2c552006-03-20 17:47:20 -08008793 tg3_phy_reset(tp);
8794
Linus Torvalds1da177e2005-04-16 15:20:36 -07008795 err = tg3_chip_reset(tp);
8796 if (err)
8797 return err;
8798
8799 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8800
Matt Carlsonbcb37f62008-11-03 16:52:09 -08008801 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07008802 val = tr32(TG3_CPMU_CTRL);
8803 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8804 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08008805
8806 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8807 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8808 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8809 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8810
8811 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8812 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8813 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8814 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8815
8816 val = tr32(TG3_CPMU_HST_ACC);
8817 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8818 val |= CPMU_HST_ACC_MACCLK_6_25;
8819 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07008820 }
8821
Matt Carlson33466d932009-04-20 06:57:41 +00008822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8823 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8824 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8825 PCIE_PWR_MGMT_L1_THRESH_4MS;
8826 tw32(PCIE_PWR_MGMT_THRESH, val);
Matt Carlson521e6b92009-08-25 10:06:01 +00008827
8828 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8829 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8830
8831 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
Matt Carlson33466d932009-04-20 06:57:41 +00008832
Matt Carlsonf40386c2009-11-02 14:24:02 +00008833 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8834 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
Matt Carlson255ca312009-08-25 10:07:27 +00008835 }
8836
Joe Perches63c3a662011-04-26 08:12:10 +00008837 if (tg3_flag(tp, L1PLLPD_EN)) {
Matt Carlson614b0592010-01-20 16:58:02 +00008838 u32 grc_mode = tr32(GRC_MODE);
8839
8840 /* Access the lower 1K of PL PCIE block registers. */
8841 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8842 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8843
8844 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8845 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8846 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8847
8848 tw32(GRC_MODE, grc_mode);
8849 }
8850
Matt Carlson55086ad2011-12-14 11:09:59 +00008851 if (tg3_flag(tp, 57765_CLASS)) {
Matt Carlson5093eed2010-11-24 08:31:45 +00008852 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8853 u32 grc_mode = tr32(GRC_MODE);
Matt Carlsoncea46462010-04-12 06:58:24 +00008854
Matt Carlson5093eed2010-11-24 08:31:45 +00008855 /* Access the lower 1K of PL PCIE block registers. */
8856 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8857 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
Matt Carlsoncea46462010-04-12 06:58:24 +00008858
Matt Carlson5093eed2010-11-24 08:31:45 +00008859 val = tr32(TG3_PCIE_TLDLPL_PORT +
8860 TG3_PCIE_PL_LO_PHYCTL5);
8861 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8862 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
Matt Carlsoncea46462010-04-12 06:58:24 +00008863
Matt Carlson5093eed2010-11-24 08:31:45 +00008864 tw32(GRC_MODE, grc_mode);
8865 }
Matt Carlsona977dbe2010-04-12 06:58:26 +00008866
Matt Carlson1ff30a52011-05-19 12:12:46 +00008867 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8868 u32 grc_mode = tr32(GRC_MODE);
8869
8870 /* Access the lower 1K of DL PCIE block registers. */
8871 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8872 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8873
8874 val = tr32(TG3_PCIE_TLDLPL_PORT +
8875 TG3_PCIE_DL_LO_FTSMAX);
8876 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8877 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8878 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8879
8880 tw32(GRC_MODE, grc_mode);
8881 }
8882
Matt Carlsona977dbe2010-04-12 06:58:26 +00008883 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8884 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8885 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8886 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
Matt Carlsoncea46462010-04-12 06:58:24 +00008887 }
8888
Linus Torvalds1da177e2005-04-16 15:20:36 -07008889 /* This works around an issue with Athlon chipsets on
8890 * B3 tigon3 silicon. This bit has no effect on any
8891 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07008892 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008893 */
Joe Perches63c3a662011-04-26 08:12:10 +00008894 if (!tg3_flag(tp, CPMU_PRESENT)) {
8895 if (!tg3_flag(tp, PCI_EXPRESS))
Matt Carlson795d01c2007-10-07 23:28:17 -07008896 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8897 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8898 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008899
8900 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008901 tg3_flag(tp, PCIX_MODE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008902 val = tr32(TG3PCI_PCISTATE);
8903 val |= PCISTATE_RETRY_SAME_DMA;
8904 tw32(TG3PCI_PCISTATE, val);
8905 }
8906
Joe Perches63c3a662011-04-26 08:12:10 +00008907 if (tg3_flag(tp, ENABLE_APE)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -07008908 /* Allow reads and writes to the
8909 * APE register and memory space.
8910 */
8911 val = tr32(TG3PCI_PCISTATE);
8912 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +00008913 PCISTATE_ALLOW_APE_SHMEM_WR |
8914 PCISTATE_ALLOW_APE_PSPACE_WR;
Matt Carlson0d3031d2007-10-10 18:02:43 -07008915 tw32(TG3PCI_PCISTATE, val);
8916 }
8917
Linus Torvalds1da177e2005-04-16 15:20:36 -07008918 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8919 /* Enable some hw fixes. */
8920 val = tr32(TG3PCI_MSI_DATA);
8921 val |= (1 << 26) | (1 << 28) | (1 << 29);
8922 tw32(TG3PCI_MSI_DATA, val);
8923 }
8924
8925 /* Descriptor ring init may make accesses to the
8926 * NIC SRAM area to setup the TX descriptors, so we
8927 * can only do this after the hardware has been
8928 * successfully reset.
8929 */
Michael Chan32d8c572006-07-25 16:38:29 -07008930 err = tg3_init_rings(tp);
8931 if (err)
8932 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008933
Joe Perches63c3a662011-04-26 08:12:10 +00008934 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsoncbf9ca62009-11-13 13:03:40 +00008935 val = tr32(TG3PCI_DMA_RW_CTRL) &
8936 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
Matt Carlson1a319022010-04-12 06:58:25 +00008937 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8938 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
Matt Carlson55086ad2011-12-14 11:09:59 +00008939 if (!tg3_flag(tp, 57765_CLASS) &&
Matt Carlson0aebff42011-04-25 12:42:45 +00008940 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8941 val |= DMA_RWCTRL_TAGGED_STAT_WA;
Matt Carlsoncbf9ca62009-11-13 13:03:40 +00008942 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8943 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8944 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07008945 /* This value is determined during the probe time DMA
8946 * engine test, tg3_test_dma.
8947 */
8948 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8949 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008950
8951 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8952 GRC_MODE_4X_NIC_SEND_RINGS |
8953 GRC_MODE_NO_TX_PHDR_CSUM |
8954 GRC_MODE_NO_RX_PHDR_CSUM);
8955 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07008956
8957 /* Pseudo-header checksum is done by hardware logic and not
8958 * the offload processers, so make the chip do the pseudo-
8959 * header checksums on receive. For transmit it is more
8960 * convenient to do the pseudo-header checksum in software
8961 * as Linux does that on transmit for us in all cases.
8962 */
8963 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008964
8965 tw32(GRC_MODE,
8966 tp->grc_mode |
8967 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8968
8969 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8970 val = tr32(GRC_MISC_CFG);
8971 val &= ~0xff;
8972 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8973 tw32(GRC_MISC_CFG, val);
8974
8975 /* Initialize MBUF/DESC pool. */
Joe Perches63c3a662011-04-26 08:12:10 +00008976 if (tg3_flag(tp, 5750_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008977 /* Do nothing. */
8978 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8979 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8981 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8982 else
8983 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8984 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8985 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
Joe Perches63c3a662011-04-26 08:12:10 +00008986 } else if (tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008987 int fw_len;
8988
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -08008989 fw_len = tp->fw_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008990 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8991 tw32(BUFMGR_MB_POOL_ADDR,
8992 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8993 tw32(BUFMGR_MB_POOL_SIZE,
8994 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008996
Michael Chan0f893dc2005-07-25 12:30:38 -07008997 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008998 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8999 tp->bufmgr_config.mbuf_read_dma_low_water);
9000 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9001 tp->bufmgr_config.mbuf_mac_rx_low_water);
9002 tw32(BUFMGR_MB_HIGH_WATER,
9003 tp->bufmgr_config.mbuf_high_water);
9004 } else {
9005 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9006 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9007 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9008 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9009 tw32(BUFMGR_MB_HIGH_WATER,
9010 tp->bufmgr_config.mbuf_high_water_jumbo);
9011 }
9012 tw32(BUFMGR_DMA_LOW_WATER,
9013 tp->bufmgr_config.dma_low_water);
9014 tw32(BUFMGR_DMA_HIGH_WATER,
9015 tp->bufmgr_config.dma_high_water);
9016
Matt Carlsond309a462010-09-30 10:34:31 +00009017 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9019 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
Matt Carlson4d958472011-04-20 07:57:35 +00009020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9021 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9022 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9023 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
Matt Carlsond309a462010-09-30 10:34:31 +00009024 tw32(BUFMGR_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009025 for (i = 0; i < 2000; i++) {
9026 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9027 break;
9028 udelay(10);
9029 }
9030 if (i >= 2000) {
Joe Perches05dbe002010-02-17 19:44:19 +00009031 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009032 return -ENODEV;
9033 }
9034
Matt Carlsoneb07a942011-04-20 07:57:36 +00009035 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9036 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
Michael Chanb5d37722006-09-27 16:06:21 -07009037
Matt Carlsoneb07a942011-04-20 07:57:36 +00009038 tg3_setup_rxbd_thresholds(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009039
9040 /* Initialize TG3_BDINFO's at:
9041 * RCVDBDI_STD_BD: standard eth size rx ring
9042 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9043 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9044 *
9045 * like so:
9046 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9047 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9048 * ring attribute flags
9049 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9050 *
9051 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9052 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9053 *
9054 * The size of each ring is fixed in the firmware, but the location is
9055 * configurable.
9056 */
9057 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
Matt Carlson21f581a2009-08-28 14:00:25 +00009058 ((u64) tpr->rx_std_mapping >> 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009059 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
Matt Carlson21f581a2009-08-28 14:00:25 +00009060 ((u64) tpr->rx_std_mapping & 0xffffffff));
Joe Perches63c3a662011-04-26 08:12:10 +00009061 if (!tg3_flag(tp, 5717_PLUS))
Matt Carlson87668d32009-11-13 13:03:34 +00009062 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9063 NIC_SRAM_RX_BUFFER_DESC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009064
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009065 /* Disable the mini ring */
Joe Perches63c3a662011-04-26 08:12:10 +00009066 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009067 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9068 BDINFO_FLAGS_DISABLED);
9069
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009070 /* Program the jumbo buffer descriptor ring control
9071 * blocks on those devices that have them.
9072 */
Matt Carlsona0512942011-07-27 14:20:54 +00009073 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
Joe Perches63c3a662011-04-26 08:12:10 +00009074 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009075
Joe Perches63c3a662011-04-26 08:12:10 +00009076 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009077 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
Matt Carlson21f581a2009-08-28 14:00:25 +00009078 ((u64) tpr->rx_jmb_mapping >> 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009079 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
Matt Carlson21f581a2009-08-28 14:00:25 +00009080 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
Matt Carlsonde9f5232011-04-05 14:22:43 +00009081 val = TG3_RX_JMB_RING_SIZE(tp) <<
9082 BDINFO_FLAGS_MAXLEN_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009083 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
Matt Carlsonde9f5232011-04-05 14:22:43 +00009084 val | BDINFO_FLAGS_USE_EXT_RECV);
Joe Perches63c3a662011-04-26 08:12:10 +00009085 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
Matt Carlson55086ad2011-12-14 11:09:59 +00009086 tg3_flag(tp, 57765_CLASS))
Matt Carlson87668d32009-11-13 13:03:34 +00009087 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9088 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009089 } else {
9090 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9091 BDINFO_FLAGS_DISABLED);
9092 }
9093
Joe Perches63c3a662011-04-26 08:12:10 +00009094 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsonfa6b2aa2011-11-21 15:01:19 +00009095 val = TG3_RX_STD_RING_SIZE(tp);
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009096 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9097 val |= (TG3_RX_STD_DMA_SZ << 2);
9098 } else
Matt Carlson04380d42010-04-12 06:58:29 +00009099 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009100 } else
Matt Carlsonde9f5232011-04-05 14:22:43 +00009101 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009102
9103 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009104
Matt Carlson411da642009-11-13 13:03:46 +00009105 tpr->rx_std_prod_idx = tp->rx_pending;
Matt Carlson66711e662009-11-13 13:03:49 +00009106 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009107
Joe Perches63c3a662011-04-26 08:12:10 +00009108 tpr->rx_jmb_prod_idx =
9109 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
Matt Carlson66711e662009-11-13 13:03:49 +00009110 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009111
Matt Carlson2d31eca2009-09-01 12:53:31 +00009112 tg3_rings_reset(tp);
9113
Linus Torvalds1da177e2005-04-16 15:20:36 -07009114 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07009115 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009116
9117 /* MTU + ethernet header + FCS + optional VLAN tag */
Matt Carlsonf7b493e2009-02-25 14:21:52 +00009118 tw32(MAC_RX_MTU_SIZE,
9119 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009120
9121 /* The slot time is changed by tg3_setup_phy if we
9122 * run at gigabit with half duplex.
9123 */
Matt Carlsonf2096f92011-04-05 14:22:48 +00009124 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9125 (6 << TX_LENGTHS_IPG_SHIFT) |
9126 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9127
9128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9129 val |= tr32(MAC_TX_LENGTHS) &
9130 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9131 TX_LENGTHS_CNT_DWN_VAL_MSK);
9132
9133 tw32(MAC_TX_LENGTHS, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009134
9135 /* Receive rules. */
9136 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9137 tw32(RCVLPC_CONFIG, 0x0181);
9138
9139 /* Calculate RDMAC_MODE setting early, we need it to determine
9140 * the RCVLPC_STATE_ENABLE mask.
9141 */
9142 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9143 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9144 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9145 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9146 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07009147
Matt Carlsondeabaac2010-11-24 08:31:50 +00009148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
Matt Carlson0339e4e2010-02-12 14:47:09 +00009149 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9150
Matt Carlson57e69832008-05-25 23:48:31 -07009151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson321d32a2008-11-21 17:22:19 -08009152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
Matt Carlsond30cdd22007-10-07 23:28:35 -07009154 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9155 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9156 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9157
Matt Carlsonc5908932011-03-09 16:58:25 +00009158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9159 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +00009160 if (tg3_flag(tp, TSO_CAPABLE) &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07009161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009162 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9163 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009164 !tg3_flag(tp, IS_5788)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009165 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9166 }
9167 }
9168
Joe Perches63c3a662011-04-26 08:12:10 +00009169 if (tg3_flag(tp, PCI_EXPRESS))
Michael Chan85e94ce2005-04-21 17:05:28 -07009170 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9171
Joe Perches63c3a662011-04-26 08:12:10 +00009172 if (tg3_flag(tp, HW_TSO_1) ||
9173 tg3_flag(tp, HW_TSO_2) ||
9174 tg3_flag(tp, HW_TSO_3))
Matt Carlson027455a2008-12-21 20:19:30 -08009175 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9176
Matt Carlson108a6c12011-05-19 12:12:47 +00009177 if (tg3_flag(tp, 57765_PLUS) ||
Matt Carlsone849cdc2009-11-13 13:03:38 +00009178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Matt Carlson027455a2008-12-21 20:19:30 -08009179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9180 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009181
Matt Carlsonf2096f92011-04-05 14:22:48 +00009182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9183 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9184
Matt Carlson41a8a7e2010-09-15 08:59:53 +00009185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
Joe Perches63c3a662011-04-26 08:12:10 +00009189 tg3_flag(tp, 57765_PLUS)) {
Matt Carlson41a8a7e2010-09-15 08:59:53 +00009190 val = tr32(TG3_RDMA_RSRVCTRL_REG);
Michael Chan10ce95d2012-07-29 19:15:42 +00009191 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
Matt Carlsonb4495ed2011-01-25 15:58:47 +00009192 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9193 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9194 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9195 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9196 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9197 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
Matt Carlsonb75cc0e2010-11-24 08:31:46 +00009198 }
Matt Carlson41a8a7e2010-09-15 08:59:53 +00009199 tw32(TG3_RDMA_RSRVCTRL_REG,
9200 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9201 }
9202
Matt Carlsond78b59f2011-04-05 14:22:46 +00009203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
Matt Carlsond309a462010-09-30 10:34:31 +00009205 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9206 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9207 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9208 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9209 }
9210
Linus Torvalds1da177e2005-04-16 15:20:36 -07009211 /* Receive/send statistics. */
Joe Perches63c3a662011-04-26 08:12:10 +00009212 if (tg3_flag(tp, 5750_PLUS)) {
Michael Chan16613942006-06-29 20:15:13 -07009213 val = tr32(RCVLPC_STATS_ENABLE);
9214 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9215 tw32(RCVLPC_STATS_ENABLE, val);
9216 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009217 tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009218 val = tr32(RCVLPC_STATS_ENABLE);
9219 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9220 tw32(RCVLPC_STATS_ENABLE, val);
9221 } else {
9222 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9223 }
9224 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9225 tw32(SNDDATAI_STATSENAB, 0xffffff);
9226 tw32(SNDDATAI_STATSCTRL,
9227 (SNDDATAI_SCTRL_ENABLE |
9228 SNDDATAI_SCTRL_FASTUPD));
9229
9230 /* Setup host coalescing engine. */
9231 tw32(HOSTCC_MODE, 0);
9232 for (i = 0; i < 2000; i++) {
9233 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9234 break;
9235 udelay(10);
9236 }
9237
Michael Chand244c892005-07-05 14:42:33 -07009238 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009239
Joe Perches63c3a662011-04-26 08:12:10 +00009240 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009241 /* Status/statistics block address. See tg3_timer,
9242 * the tg3_periodic_fetch_stats call there, and
9243 * tg3_get_stats to see how this works for 5705/5750 chips.
9244 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009245 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9246 ((u64) tp->stats_mapping >> 32));
9247 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9248 ((u64) tp->stats_mapping & 0xffffffff));
9249 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
Matt Carlson2d31eca2009-09-01 12:53:31 +00009250
Linus Torvalds1da177e2005-04-16 15:20:36 -07009251 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
Matt Carlson2d31eca2009-09-01 12:53:31 +00009252
9253 /* Clear statistics and status block memory areas */
9254 for (i = NIC_SRAM_STATS_BLK;
9255 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9256 i += sizeof(u32)) {
9257 tg3_write_mem(tp, i, 0);
9258 udelay(40);
9259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009260 }
9261
9262 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9263
9264 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9265 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009266 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009267 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9268
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009269 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9270 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chanc94e3942005-09-27 12:12:42 -07009271 /* reset to prevent losing 1st rx packet intermittently */
9272 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9273 udelay(10);
9274 }
9275
Matt Carlson3bda1252008-08-15 14:08:22 -07009276 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Matt Carlson9e975cc2011-07-20 10:20:50 +00009277 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9278 MAC_MODE_FHDE_ENABLE;
9279 if (tg3_flag(tp, ENABLE_APE))
9280 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
Joe Perches63c3a662011-04-26 08:12:10 +00009281 if (!tg3_flag(tp, 5705_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009282 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009283 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9284 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009285 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9286 udelay(40);
9287
Michael Chan314fba32005-04-21 17:07:04 -07009288 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Joe Perches63c3a662011-04-26 08:12:10 +00009289 * If TG3_FLAG_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07009290 * register to preserve the GPIO settings for LOMs. The GPIOs,
9291 * whether used as inputs or outputs, are set by boot code after
9292 * reset.
9293 */
Joe Perches63c3a662011-04-26 08:12:10 +00009294 if (!tg3_flag(tp, IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07009295 u32 gpio_mask;
9296
Michael Chan9d26e212006-12-07 00:21:14 -08009297 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9298 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9299 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07009300
9301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9302 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9303 GRC_LCLCTRL_GPIO_OUTPUT3;
9304
Michael Chanaf36e6b2006-03-23 01:28:06 -08009305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9306 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9307
Gary Zambranoaaf84462007-05-05 11:51:45 -07009308 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07009309 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9310
9311 /* GPIO1 must be driven high for eeprom write protect */
Joe Perches63c3a662011-04-26 08:12:10 +00009312 if (tg3_flag(tp, EEPROM_WRITE_PROT))
Michael Chan9d26e212006-12-07 00:21:14 -08009313 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9314 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07009315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009316 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9317 udelay(100);
9318
Matt Carlsonc3b50032012-01-17 15:27:23 +00009319 if (tg3_flag(tp, USING_MSIX)) {
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009320 val = tr32(MSGINT_MODE);
Matt Carlsonc3b50032012-01-17 15:27:23 +00009321 val |= MSGINT_MODE_ENABLE;
9322 if (tp->irq_cnt > 1)
9323 val |= MSGINT_MODE_MULTIVEC_EN;
Matt Carlson5b39de92011-08-31 11:44:50 +00009324 if (!tg3_flag(tp, 1SHOT_MSI))
9325 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009326 tw32(MSGINT_MODE, val);
9327 }
9328
Joe Perches63c3a662011-04-26 08:12:10 +00009329 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009330 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9331 udelay(40);
9332 }
9333
9334 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9335 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9336 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9337 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9338 WDMAC_MODE_LNGREAD_ENAB);
9339
Matt Carlsonc5908932011-03-09 16:58:25 +00009340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9341 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +00009342 if (tg3_flag(tp, TSO_CAPABLE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009343 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9344 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9345 /* nothing */
9346 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009347 !tg3_flag(tp, IS_5788)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009348 val |= WDMAC_MODE_RX_ACCEL;
9349 }
9350 }
9351
Michael Chand9ab5ad12006-03-20 22:27:35 -08009352 /* Enable host coalescing bug fix */
Joe Perches63c3a662011-04-26 08:12:10 +00009353 if (tg3_flag(tp, 5755_PLUS))
Matt Carlsonf51f3562008-05-25 23:45:08 -07009354 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad12006-03-20 22:27:35 -08009355
Matt Carlson788a0352009-11-02 14:26:03 +00009356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9357 val |= WDMAC_MODE_BURST_ALL_DATA;
9358
Linus Torvalds1da177e2005-04-16 15:20:36 -07009359 tw32_f(WDMAC_MODE, val);
9360 udelay(40);
9361
Joe Perches63c3a662011-04-26 08:12:10 +00009362 if (tg3_flag(tp, PCIX_MODE)) {
Matt Carlson9974a352007-10-07 23:27:28 -07009363 u16 pcix_cmd;
9364
9365 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9366 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07009368 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9369 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009370 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07009371 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9372 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009373 }
Matt Carlson9974a352007-10-07 23:27:28 -07009374 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9375 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009376 }
9377
9378 tw32_f(RDMAC_MODE, rdmac_mode);
9379 udelay(40);
9380
Michael Chan091f0ea2012-07-29 19:15:43 +00009381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9382 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9383 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9384 break;
9385 }
9386 if (i < TG3_NUM_RDMA_CHANNELS) {
9387 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9388 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9389 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9390 tg3_flag_set(tp, 5719_RDMA_BUG);
9391 }
9392 }
9393
Linus Torvalds1da177e2005-04-16 15:20:36 -07009394 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009395 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009396 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07009397
9398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9399 tw32(SNDDATAC_MODE,
9400 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9401 else
9402 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9403
Linus Torvalds1da177e2005-04-16 15:20:36 -07009404 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9405 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009406 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
Joe Perches63c3a662011-04-26 08:12:10 +00009407 if (tg3_flag(tp, LRG_PROD_RING_CAP))
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009408 val |= RCVDBDI_MODE_LRG_RING_SZ;
9409 tw32(RCVDBDI_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009410 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009411 if (tg3_flag(tp, HW_TSO_1) ||
9412 tg3_flag(tp, HW_TSO_2) ||
9413 tg3_flag(tp, HW_TSO_3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009414 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009415 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00009416 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009417 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9418 tw32(SNDBDI_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009419 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9420
9421 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9422 err = tg3_load_5701_a0_firmware_fix(tp);
9423 if (err)
9424 return err;
9425 }
9426
Joe Perches63c3a662011-04-26 08:12:10 +00009427 if (tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009428 err = tg3_load_tso_firmware(tp);
9429 if (err)
9430 return err;
9431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009432
9433 tp->tx_mode = TX_MODE_ENABLE;
Matt Carlsonf2096f92011-04-05 14:22:48 +00009434
Joe Perches63c3a662011-04-26 08:12:10 +00009435 if (tg3_flag(tp, 5755_PLUS) ||
Matt Carlsonb1d05212010-06-05 17:24:31 +00009436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9437 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
Matt Carlsonf2096f92011-04-05 14:22:48 +00009438
9439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9440 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9441 tp->tx_mode &= ~val;
9442 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9443 }
9444
Linus Torvalds1da177e2005-04-16 15:20:36 -07009445 tw32_f(MAC_TX_MODE, tp->tx_mode);
9446 udelay(100);
9447
Joe Perches63c3a662011-04-26 08:12:10 +00009448 if (tg3_flag(tp, ENABLE_RSS)) {
Matt Carlsonbcebcc42011-12-14 11:10:01 +00009449 tg3_rss_write_indir_tbl(tp);
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009450
9451 /* Setup the "secret" hash key. */
9452 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9453 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9454 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9455 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9456 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9457 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9458 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9459 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9460 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9461 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9462 }
9463
Linus Torvalds1da177e2005-04-16 15:20:36 -07009464 tp->rx_mode = RX_MODE_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00009465 if (tg3_flag(tp, 5755_PLUS))
Michael Chanaf36e6b2006-03-23 01:28:06 -08009466 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9467
Joe Perches63c3a662011-04-26 08:12:10 +00009468 if (tg3_flag(tp, ENABLE_RSS))
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009469 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9470 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9471 RX_MODE_RSS_IPV6_HASH_EN |
9472 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9473 RX_MODE_RSS_IPV4_HASH_EN |
9474 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9475
Linus Torvalds1da177e2005-04-16 15:20:36 -07009476 tw32_f(MAC_RX_MODE, tp->rx_mode);
9477 udelay(10);
9478
Linus Torvalds1da177e2005-04-16 15:20:36 -07009479 tw32(MAC_LED_CTRL, tp->led_ctrl);
9480
9481 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009482 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009483 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9484 udelay(10);
9485 }
9486 tw32_f(MAC_RX_MODE, tp->rx_mode);
9487 udelay(10);
9488
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009489 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009490 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009491 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009492 /* Set drive transmission level to 1.2V */
9493 /* only if the signal pre-emphasis bit is not set */
9494 val = tr32(MAC_SERDES_CFG);
9495 val &= 0xfffff000;
9496 val |= 0x880;
9497 tw32(MAC_SERDES_CFG, val);
9498 }
9499 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9500 tw32(MAC_SERDES_CFG, 0x616000);
9501 }
9502
9503 /* Prevent chip from dropping frames when flow control
9504 * is enabled.
9505 */
Matt Carlson55086ad2011-12-14 11:09:59 +00009506 if (tg3_flag(tp, 57765_CLASS))
Matt Carlson666bc832010-01-20 16:58:03 +00009507 val = 1;
9508 else
9509 val = 2;
9510 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009511
9512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009513 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009514 /* Use hardware link auto-negotiation */
Joe Perches63c3a662011-04-26 08:12:10 +00009515 tg3_flag_set(tp, HW_AUTONEG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009516 }
9517
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009518 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
Matt Carlson6ff6f812011-05-19 12:12:54 +00009519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
Michael Chand4d2c552006-03-20 17:47:20 -08009520 u32 tmp;
9521
9522 tmp = tr32(SERDES_RX_CTRL);
9523 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9524 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9525 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9526 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9527 }
9528
Joe Perches63c3a662011-04-26 08:12:10 +00009529 if (!tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonc6700ce2012-02-13 15:20:15 +00009530 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Matt Carlson80096062010-08-02 11:26:06 +00009531 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009532
Matt Carlsondd477002008-05-25 23:45:58 -07009533 err = tg3_setup_phy(tp, 0);
9534 if (err)
9535 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009536
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009537 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9538 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
Matt Carlsondd477002008-05-25 23:45:58 -07009539 u32 tmp;
9540
9541 /* Clear CRC stats. */
9542 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9543 tg3_writephy(tp, MII_TG3_TEST1,
9544 tmp | MII_TG3_TEST1_CRC_EN);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00009545 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
Matt Carlsondd477002008-05-25 23:45:58 -07009546 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009547 }
9548 }
9549
9550 __tg3_set_rx_mode(tp->dev);
9551
9552 /* Initialize receive rules. */
9553 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9554 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9555 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9556 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9557
Joe Perches63c3a662011-04-26 08:12:10 +00009558 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009559 limit = 8;
9560 else
9561 limit = 16;
Joe Perches63c3a662011-04-26 08:12:10 +00009562 if (tg3_flag(tp, ENABLE_ASF))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009563 limit -= 4;
9564 switch (limit) {
9565 case 16:
9566 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9567 case 15:
9568 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9569 case 14:
9570 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9571 case 13:
9572 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9573 case 12:
9574 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9575 case 11:
9576 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9577 case 10:
9578 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9579 case 9:
9580 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9581 case 8:
9582 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9583 case 7:
9584 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9585 case 6:
9586 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9587 case 5:
9588 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9589 case 4:
9590 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9591 case 3:
9592 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9593 case 2:
9594 case 1:
9595
9596 default:
9597 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07009598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009599
Joe Perches63c3a662011-04-26 08:12:10 +00009600 if (tg3_flag(tp, ENABLE_APE))
Matt Carlson9ce768e2007-10-11 19:49:11 -07009601 /* Write our heartbeat update interval to APE. */
9602 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9603 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07009604
Linus Torvalds1da177e2005-04-16 15:20:36 -07009605 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9606
Linus Torvalds1da177e2005-04-16 15:20:36 -07009607 return 0;
9608}
9609
9610/* Called at device open time to get the chip ready for
9611 * packet processing. Invoked with tp->lock held.
9612 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07009613static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009614{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009615 tg3_switch_clocks(tp);
9616
9617 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9618
Matt Carlson2f751b62008-08-04 23:17:34 -07009619 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009620}
9621
Michael Chanaed93e02012-07-16 16:24:02 +00009622static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9623{
9624 int i;
9625
9626 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9627 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9628
9629 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9630 off += len;
9631
9632 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9633 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9634 memset(ocir, 0, TG3_OCIR_LEN);
9635 }
9636}
9637
9638/* sysfs attributes for hwmon */
9639static ssize_t tg3_show_temp(struct device *dev,
9640 struct device_attribute *devattr, char *buf)
9641{
9642 struct pci_dev *pdev = to_pci_dev(dev);
9643 struct net_device *netdev = pci_get_drvdata(pdev);
9644 struct tg3 *tp = netdev_priv(netdev);
9645 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9646 u32 temperature;
9647
9648 spin_lock_bh(&tp->lock);
9649 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9650 sizeof(temperature));
9651 spin_unlock_bh(&tp->lock);
9652 return sprintf(buf, "%u\n", temperature);
9653}
9654
9655
9656static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9657 TG3_TEMP_SENSOR_OFFSET);
9658static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9659 TG3_TEMP_CAUTION_OFFSET);
9660static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9661 TG3_TEMP_MAX_OFFSET);
9662
9663static struct attribute *tg3_attributes[] = {
9664 &sensor_dev_attr_temp1_input.dev_attr.attr,
9665 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9666 &sensor_dev_attr_temp1_max.dev_attr.attr,
9667 NULL
9668};
9669
9670static const struct attribute_group tg3_group = {
9671 .attrs = tg3_attributes,
9672};
9673
Michael Chanaed93e02012-07-16 16:24:02 +00009674static void tg3_hwmon_close(struct tg3 *tp)
9675{
Michael Chanaed93e02012-07-16 16:24:02 +00009676 if (tp->hwmon_dev) {
9677 hwmon_device_unregister(tp->hwmon_dev);
9678 tp->hwmon_dev = NULL;
9679 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9680 }
Michael Chanaed93e02012-07-16 16:24:02 +00009681}
9682
9683static void tg3_hwmon_open(struct tg3 *tp)
9684{
Michael Chanaed93e02012-07-16 16:24:02 +00009685 int i, err;
9686 u32 size = 0;
9687 struct pci_dev *pdev = tp->pdev;
9688 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9689
9690 tg3_sd_scan_scratchpad(tp, ocirs);
9691
9692 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9693 if (!ocirs[i].src_data_length)
9694 continue;
9695
9696 size += ocirs[i].src_hdr_length;
9697 size += ocirs[i].src_data_length;
9698 }
9699
9700 if (!size)
9701 return;
9702
9703 /* Register hwmon sysfs hooks */
9704 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9705 if (err) {
9706 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9707 return;
9708 }
9709
9710 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9711 if (IS_ERR(tp->hwmon_dev)) {
9712 tp->hwmon_dev = NULL;
9713 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9714 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9715 }
Michael Chanaed93e02012-07-16 16:24:02 +00009716}
9717
9718
Linus Torvalds1da177e2005-04-16 15:20:36 -07009719#define TG3_STAT_ADD32(PSTAT, REG) \
9720do { u32 __val = tr32(REG); \
9721 (PSTAT)->low += __val; \
9722 if ((PSTAT)->low < __val) \
9723 (PSTAT)->high += 1; \
9724} while (0)
9725
9726static void tg3_periodic_fetch_stats(struct tg3 *tp)
9727{
9728 struct tg3_hw_stats *sp = tp->hw_stats;
9729
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00009730 if (!tp->link_up)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009731 return;
9732
9733 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9734 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9735 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9736 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9737 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9738 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9739 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9740 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9741 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9742 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9743 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9744 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9745 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
Michael Chan091f0ea2012-07-29 19:15:43 +00009746 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9747 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9748 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9749 u32 val;
9750
9751 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9752 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9753 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9754 tg3_flag_clear(tp, 5719_RDMA_BUG);
9755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009756
9757 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9758 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9759 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9760 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9761 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9762 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9763 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9764 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9765 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9766 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9767 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9768 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9769 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9770 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07009771
9772 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
Matt Carlson310050f2011-05-19 12:12:55 +00009773 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9774 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9775 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
Matt Carlson4d958472011-04-20 07:57:35 +00009776 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9777 } else {
9778 u32 val = tr32(HOSTCC_FLOW_ATTN);
9779 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9780 if (val) {
9781 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9782 sp->rx_discards.low += val;
9783 if (sp->rx_discards.low < val)
9784 sp->rx_discards.high += 1;
9785 }
9786 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9787 }
Michael Chan463d3052006-05-22 16:36:27 -07009788 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009789}
9790
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009791static void tg3_chk_missed_msi(struct tg3 *tp)
9792{
9793 u32 i;
9794
9795 for (i = 0; i < tp->irq_cnt; i++) {
9796 struct tg3_napi *tnapi = &tp->napi[i];
9797
9798 if (tg3_has_work(tnapi)) {
9799 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9800 tnapi->last_tx_cons == tnapi->tx_cons) {
9801 if (tnapi->chk_msi_cnt < 1) {
9802 tnapi->chk_msi_cnt++;
9803 return;
9804 }
Matt Carlson7f230732011-08-31 11:44:48 +00009805 tg3_msi(0, tnapi);
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009806 }
9807 }
9808 tnapi->chk_msi_cnt = 0;
9809 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9810 tnapi->last_tx_cons = tnapi->tx_cons;
9811 }
9812}
9813
Linus Torvalds1da177e2005-04-16 15:20:36 -07009814static void tg3_timer(unsigned long __opaque)
9815{
9816 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009817
Matt Carlson5b190622011-11-04 09:15:04 +00009818 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
Michael Chanf475f162006-03-27 23:20:14 -08009819 goto restart_timer;
9820
David S. Millerf47c11e2005-06-24 20:18:35 -07009821 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009822
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00009824 tg3_flag(tp, 57765_CLASS))
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009825 tg3_chk_missed_msi(tp);
9826
Joe Perches63c3a662011-04-26 08:12:10 +00009827 if (!tg3_flag(tp, TAGGED_STATUS)) {
David S. Millerfac9b832005-05-18 22:46:34 -07009828 /* All of this garbage is because when using non-tagged
9829 * IRQ status the mailbox/status_block protocol the chip
9830 * uses with the cpu is race prone.
9831 */
Matt Carlson898a56f2009-08-28 14:02:40 +00009832 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
David S. Millerfac9b832005-05-18 22:46:34 -07009833 tw32(GRC_LOCAL_CTRL,
9834 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9835 } else {
9836 tw32(HOSTCC_MODE, tp->coalesce_mode |
Matt Carlsonfd2ce372009-09-01 12:51:13 +00009837 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
David S. Millerfac9b832005-05-18 22:46:34 -07009838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009839
David S. Millerfac9b832005-05-18 22:46:34 -07009840 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
David S. Millerf47c11e2005-06-24 20:18:35 -07009841 spin_unlock(&tp->lock);
Matt Carlsondb219972011-11-04 09:15:03 +00009842 tg3_reset_task_schedule(tp);
Matt Carlson5b190622011-11-04 09:15:04 +00009843 goto restart_timer;
David S. Millerfac9b832005-05-18 22:46:34 -07009844 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009845 }
9846
Linus Torvalds1da177e2005-04-16 15:20:36 -07009847 /* This part only runs once per second. */
9848 if (!--tp->timer_counter) {
Joe Perches63c3a662011-04-26 08:12:10 +00009849 if (tg3_flag(tp, 5705_PLUS))
David S. Millerfac9b832005-05-18 22:46:34 -07009850 tg3_periodic_fetch_stats(tp);
9851
Matt Carlsonb0c59432011-05-19 12:12:48 +00009852 if (tp->setlpicnt && !--tp->setlpicnt)
9853 tg3_phy_eee_enable(tp);
Matt Carlson52b02d02010-10-14 10:37:41 +00009854
Joe Perches63c3a662011-04-26 08:12:10 +00009855 if (tg3_flag(tp, USE_LINKCHG_REG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009856 u32 mac_stat;
9857 int phy_event;
9858
9859 mac_stat = tr32(MAC_STATUS);
9860
9861 phy_event = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009862 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009863 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9864 phy_event = 1;
9865 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9866 phy_event = 1;
9867
9868 if (phy_event)
9869 tg3_setup_phy(tp, 0);
Joe Perches63c3a662011-04-26 08:12:10 +00009870 } else if (tg3_flag(tp, POLL_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009871 u32 mac_stat = tr32(MAC_STATUS);
9872 int need_setup = 0;
9873
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00009874 if (tp->link_up &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009875 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9876 need_setup = 1;
9877 }
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00009878 if (!tp->link_up &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009879 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9880 MAC_STATUS_SIGNAL_DET))) {
9881 need_setup = 1;
9882 }
9883 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07009884 if (!tp->serdes_counter) {
9885 tw32_f(MAC_MODE,
9886 (tp->mac_mode &
9887 ~MAC_MODE_PORT_MODE_MASK));
9888 udelay(40);
9889 tw32_f(MAC_MODE, tp->mac_mode);
9890 udelay(40);
9891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009892 tg3_setup_phy(tp, 0);
9893 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009894 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009895 tg3_flag(tp, 5780_CLASS)) {
Michael Chan747e8f82005-07-25 12:33:22 -07009896 tg3_serdes_parallel_detect(tp);
Matt Carlson57d8b882010-06-05 17:24:35 +00009897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009898
9899 tp->timer_counter = tp->timer_multiplier;
9900 }
9901
Michael Chan130b8e42006-09-27 16:00:40 -07009902 /* Heartbeat is only sent once every 2 seconds.
9903 *
9904 * The heartbeat is to tell the ASF firmware that the host
9905 * driver is still alive. In the event that the OS crashes,
9906 * ASF needs to reset the hardware to free up the FIFO space
9907 * that may be filled with rx packets destined for the host.
9908 * If the FIFO is full, ASF will no longer function properly.
9909 *
9910 * Unintended resets have been reported on real time kernels
9911 * where the timer doesn't run on time. Netpoll will also have
9912 * same problem.
9913 *
9914 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9915 * to check the ring condition when the heartbeat is expiring
9916 * before doing the reset. This will prevent most unintended
9917 * resets.
9918 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009919 if (!--tp->asf_counter) {
Joe Perches63c3a662011-04-26 08:12:10 +00009920 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07009921 tg3_wait_for_event_ack(tp);
9922
Michael Chanbbadf502006-04-06 21:46:34 -07009923 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07009924 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07009925 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Matt Carlsonc6cdf432010-04-05 10:19:26 +00009926 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9927 TG3_FW_UPDATE_TIMEOUT_SEC);
Matt Carlson4ba526c2008-08-15 14:10:04 -07009928
9929 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009930 }
9931 tp->asf_counter = tp->asf_multiplier;
9932 }
9933
David S. Millerf47c11e2005-06-24 20:18:35 -07009934 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009935
Michael Chanf475f162006-03-27 23:20:14 -08009936restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009937 tp->timer.expires = jiffies + tp->timer_offset;
9938 add_timer(&tp->timer);
9939}
9940
Matt Carlson21f76382012-02-22 12:35:21 +00009941static void __devinit tg3_timer_init(struct tg3 *tp)
9942{
9943 if (tg3_flag(tp, TAGGED_STATUS) &&
9944 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9945 !tg3_flag(tp, 57765_CLASS))
9946 tp->timer_offset = HZ;
9947 else
9948 tp->timer_offset = HZ / 10;
9949
9950 BUG_ON(tp->timer_offset > HZ);
9951
9952 tp->timer_multiplier = (HZ / tp->timer_offset);
9953 tp->asf_multiplier = (HZ / tp->timer_offset) *
9954 TG3_FW_UPDATE_FREQ_SEC;
9955
9956 init_timer(&tp->timer);
9957 tp->timer.data = (unsigned long) tp;
9958 tp->timer.function = tg3_timer;
9959}
9960
9961static void tg3_timer_start(struct tg3 *tp)
9962{
9963 tp->asf_counter = tp->asf_multiplier;
9964 tp->timer_counter = tp->timer_multiplier;
9965
9966 tp->timer.expires = jiffies + tp->timer_offset;
9967 add_timer(&tp->timer);
9968}
9969
9970static void tg3_timer_stop(struct tg3 *tp)
9971{
9972 del_timer_sync(&tp->timer);
9973}
9974
9975/* Restart hardware after configuration changes, self-test, etc.
9976 * Invoked with tp->lock held.
9977 */
9978static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9979 __releases(tp->lock)
9980 __acquires(tp->lock)
9981{
9982 int err;
9983
9984 err = tg3_init_hw(tp, reset_phy);
9985 if (err) {
9986 netdev_err(tp->dev,
9987 "Failed to re-initialize device, aborting\n");
9988 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9989 tg3_full_unlock(tp);
9990 tg3_timer_stop(tp);
9991 tp->irq_sync = 0;
9992 tg3_napi_enable(tp);
9993 dev_close(tp->dev);
9994 tg3_full_lock(tp, 0);
9995 }
9996 return err;
9997}
9998
9999static void tg3_reset_task(struct work_struct *work)
10000{
10001 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10002 int err;
10003
10004 tg3_full_lock(tp, 0);
10005
10006 if (!netif_running(tp->dev)) {
10007 tg3_flag_clear(tp, RESET_TASK_PENDING);
10008 tg3_full_unlock(tp);
10009 return;
10010 }
10011
10012 tg3_full_unlock(tp);
10013
10014 tg3_phy_stop(tp);
10015
10016 tg3_netif_stop(tp);
10017
10018 tg3_full_lock(tp, 1);
10019
10020 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10021 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10022 tp->write32_rx_mbox = tg3_write_flush_reg32;
10023 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10024 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10025 }
10026
10027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10028 err = tg3_init_hw(tp, 1);
10029 if (err)
10030 goto out;
10031
10032 tg3_netif_start(tp);
10033
10034out:
10035 tg3_full_unlock(tp);
10036
10037 if (!err)
10038 tg3_phy_start(tp);
10039
10040 tg3_flag_clear(tp, RESET_TASK_PENDING);
10041}
10042
Matt Carlson4f125f42009-09-01 12:55:02 +000010043static int tg3_request_irq(struct tg3 *tp, int irq_num)
Michael Chanfcfa0a32006-03-20 22:28:41 -080010044{
David Howells7d12e782006-10-05 14:55:46 +010010045 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010046 unsigned long flags;
Matt Carlson4f125f42009-09-01 12:55:02 +000010047 char *name;
10048 struct tg3_napi *tnapi = &tp->napi[irq_num];
10049
10050 if (tp->irq_cnt == 1)
10051 name = tp->dev->name;
10052 else {
10053 name = &tnapi->irq_lbl[0];
10054 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10055 name[IFNAMSIZ-1] = 0;
10056 }
Michael Chanfcfa0a32006-03-20 22:28:41 -080010057
Joe Perches63c3a662011-04-26 08:12:10 +000010058 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
Michael Chanfcfa0a32006-03-20 22:28:41 -080010059 fn = tg3_msi;
Joe Perches63c3a662011-04-26 08:12:10 +000010060 if (tg3_flag(tp, 1SHOT_MSI))
Michael Chanfcfa0a32006-03-20 22:28:41 -080010061 fn = tg3_msi_1shot;
Javier Martinez Canillasab392d22011-03-28 16:27:31 +000010062 flags = 0;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010063 } else {
10064 fn = tg3_interrupt;
Joe Perches63c3a662011-04-26 08:12:10 +000010065 if (tg3_flag(tp, TAGGED_STATUS))
Michael Chanfcfa0a32006-03-20 22:28:41 -080010066 fn = tg3_interrupt_tagged;
Javier Martinez Canillasab392d22011-03-28 16:27:31 +000010067 flags = IRQF_SHARED;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010068 }
Matt Carlson4f125f42009-09-01 12:55:02 +000010069
10070 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010071}
10072
Michael Chan79381092005-04-21 17:13:59 -070010073static int tg3_test_interrupt(struct tg3 *tp)
10074{
Matt Carlson09943a12009-08-28 14:01:57 +000010075 struct tg3_napi *tnapi = &tp->napi[0];
Michael Chan79381092005-04-21 17:13:59 -070010076 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -070010077 int err, i, intr_ok = 0;
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010078 u32 val;
Michael Chan79381092005-04-21 17:13:59 -070010079
Michael Chand4bc3922005-05-29 14:59:20 -070010080 if (!netif_running(dev))
10081 return -ENODEV;
10082
Michael Chan79381092005-04-21 17:13:59 -070010083 tg3_disable_ints(tp);
10084
Matt Carlson4f125f42009-09-01 12:55:02 +000010085 free_irq(tnapi->irq_vec, tnapi);
Michael Chan79381092005-04-21 17:13:59 -070010086
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010087 /*
10088 * Turn off MSI one shot mode. Otherwise this test has no
10089 * observable way to know whether the interrupt was delivered.
10090 */
Matt Carlson3aa1cdf2011-07-20 10:20:55 +000010091 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010092 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10093 tw32(MSGINT_MODE, val);
10094 }
10095
Matt Carlson4f125f42009-09-01 12:55:02 +000010096 err = request_irq(tnapi->irq_vec, tg3_test_isr,
Davidlohr Buesof274fd92012-02-22 03:06:54 +000010097 IRQF_SHARED, dev->name, tnapi);
Michael Chan79381092005-04-21 17:13:59 -070010098 if (err)
10099 return err;
10100
Matt Carlson898a56f2009-08-28 14:02:40 +000010101 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -070010102 tg3_enable_ints(tp);
10103
10104 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000010105 tnapi->coal_now);
Michael Chan79381092005-04-21 17:13:59 -070010106
10107 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -070010108 u32 int_mbox, misc_host_ctrl;
10109
Matt Carlson898a56f2009-08-28 14:02:40 +000010110 int_mbox = tr32_mailbox(tnapi->int_mbox);
Michael Chanb16250e2006-09-27 16:10:14 -070010111 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10112
10113 if ((int_mbox != 0) ||
10114 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10115 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -070010116 break;
Michael Chanb16250e2006-09-27 16:10:14 -070010117 }
10118
Matt Carlson3aa1cdf2011-07-20 10:20:55 +000010119 if (tg3_flag(tp, 57765_PLUS) &&
10120 tnapi->hw_status->status_tag != tnapi->last_tag)
10121 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10122
Michael Chan79381092005-04-21 17:13:59 -070010123 msleep(10);
10124 }
10125
10126 tg3_disable_ints(tp);
10127
Matt Carlson4f125f42009-09-01 12:55:02 +000010128 free_irq(tnapi->irq_vec, tnapi);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010129
Matt Carlson4f125f42009-09-01 12:55:02 +000010130 err = tg3_request_irq(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -070010131
10132 if (err)
10133 return err;
10134
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010135 if (intr_ok) {
10136 /* Reenable MSI one shot mode. */
Matt Carlson5b39de92011-08-31 11:44:50 +000010137 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010138 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10139 tw32(MSGINT_MODE, val);
10140 }
Michael Chan79381092005-04-21 17:13:59 -070010141 return 0;
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010142 }
Michael Chan79381092005-04-21 17:13:59 -070010143
10144 return -EIO;
10145}
10146
10147/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10148 * successfully restored
10149 */
10150static int tg3_test_msi(struct tg3 *tp)
10151{
Michael Chan79381092005-04-21 17:13:59 -070010152 int err;
10153 u16 pci_cmd;
10154
Joe Perches63c3a662011-04-26 08:12:10 +000010155 if (!tg3_flag(tp, USING_MSI))
Michael Chan79381092005-04-21 17:13:59 -070010156 return 0;
10157
10158 /* Turn off SERR reporting in case MSI terminates with Master
10159 * Abort.
10160 */
10161 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10162 pci_write_config_word(tp->pdev, PCI_COMMAND,
10163 pci_cmd & ~PCI_COMMAND_SERR);
10164
10165 err = tg3_test_interrupt(tp);
10166
10167 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10168
10169 if (!err)
10170 return 0;
10171
10172 /* other failures */
10173 if (err != -EIO)
10174 return err;
10175
10176 /* MSI test failed, go back to INTx mode */
Matt Carlson5129c3a2010-04-05 10:19:23 +000010177 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10178 "to INTx mode. Please report this failure to the PCI "
10179 "maintainer and include system chipset information\n");
Michael Chan79381092005-04-21 17:13:59 -070010180
Matt Carlson4f125f42009-09-01 12:55:02 +000010181 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
Matt Carlson09943a12009-08-28 14:01:57 +000010182
Michael Chan79381092005-04-21 17:13:59 -070010183 pci_disable_msi(tp->pdev);
10184
Joe Perches63c3a662011-04-26 08:12:10 +000010185 tg3_flag_clear(tp, USING_MSI);
Andre Detschdc8bf1b2010-04-26 07:27:07 +000010186 tp->napi[0].irq_vec = tp->pdev->irq;
Michael Chan79381092005-04-21 17:13:59 -070010187
Matt Carlson4f125f42009-09-01 12:55:02 +000010188 err = tg3_request_irq(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -070010189 if (err)
10190 return err;
10191
10192 /* Need to reset the chip because the MSI cycle may have terminated
10193 * with Master Abort.
10194 */
David S. Millerf47c11e2005-06-24 20:18:35 -070010195 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -070010196
Michael Chan944d9802005-05-29 14:57:48 -070010197 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070010198 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -070010199
David S. Millerf47c11e2005-06-24 20:18:35 -070010200 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -070010201
10202 if (err)
Matt Carlson4f125f42009-09-01 12:55:02 +000010203 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
Michael Chan79381092005-04-21 17:13:59 -070010204
10205 return err;
10206}
10207
Matt Carlson9e9fd122009-01-19 16:57:45 -080010208static int tg3_request_firmware(struct tg3 *tp)
10209{
10210 const __be32 *fw_data;
10211
10212 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
Joe Perches05dbe002010-02-17 19:44:19 +000010213 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10214 tp->fw_needed);
Matt Carlson9e9fd122009-01-19 16:57:45 -080010215 return -ENOENT;
10216 }
10217
10218 fw_data = (void *)tp->fw->data;
10219
10220 /* Firmware blob starts with version numbers, followed by
10221 * start address and _full_ length including BSS sections
10222 * (which must be longer than the actual data, of course
10223 */
10224
10225 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10226 if (tp->fw_len < (tp->fw->size - 12)) {
Joe Perches05dbe002010-02-17 19:44:19 +000010227 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10228 tp->fw_len, tp->fw_needed);
Matt Carlson9e9fd122009-01-19 16:57:45 -080010229 release_firmware(tp->fw);
10230 tp->fw = NULL;
10231 return -EINVAL;
10232 }
10233
10234 /* We no longer need firmware; we have it. */
10235 tp->fw_needed = NULL;
10236 return 0;
10237}
10238
Michael Chan91024262012-09-28 07:12:38 +000010239static u32 tg3_irq_count(struct tg3 *tp)
Matt Carlson679563f2009-09-01 12:55:46 +000010240{
Michael Chan91024262012-09-28 07:12:38 +000010241 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
Matt Carlson679563f2009-09-01 12:55:46 +000010242
Michael Chan91024262012-09-28 07:12:38 +000010243 if (irq_cnt > 1) {
Matt Carlsonc3b50032012-01-17 15:27:23 +000010244 /* We want as many rx rings enabled as there are cpus.
10245 * In multiqueue MSI-X mode, the first MSI-X vector
10246 * only deals with link interrupts, etc, so we add
10247 * one to the number of vectors we are requesting.
10248 */
Michael Chan91024262012-09-28 07:12:38 +000010249 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
Matt Carlsonc3b50032012-01-17 15:27:23 +000010250 }
Matt Carlson679563f2009-09-01 12:55:46 +000010251
Michael Chan91024262012-09-28 07:12:38 +000010252 return irq_cnt;
10253}
10254
10255static bool tg3_enable_msix(struct tg3 *tp)
10256{
10257 int i, rc;
Michael Chan86449942012-10-02 20:31:14 -070010258 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
Michael Chan91024262012-09-28 07:12:38 +000010259
Michael Chan09681692012-09-28 07:12:42 +000010260 tp->txq_cnt = tp->txq_req;
10261 tp->rxq_cnt = tp->rxq_req;
10262 if (!tp->rxq_cnt)
10263 tp->rxq_cnt = netif_get_num_default_rss_queues();
Michael Chan91024262012-09-28 07:12:38 +000010264 if (tp->rxq_cnt > tp->rxq_max)
10265 tp->rxq_cnt = tp->rxq_max;
Michael Chancf6d6ea2012-09-28 07:12:43 +000010266
10267 /* Disable multiple TX rings by default. Simple round-robin hardware
10268 * scheduling of the TX rings can cause starvation of rings with
10269 * small packets when other rings have TSO or jumbo packets.
10270 */
10271 if (!tp->txq_req)
10272 tp->txq_cnt = 1;
Michael Chan91024262012-09-28 07:12:38 +000010273
10274 tp->irq_cnt = tg3_irq_count(tp);
10275
Matt Carlson679563f2009-09-01 12:55:46 +000010276 for (i = 0; i < tp->irq_max; i++) {
10277 msix_ent[i].entry = i;
10278 msix_ent[i].vector = 0;
10279 }
10280
10281 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
Matt Carlson2430b032010-06-05 17:24:34 +000010282 if (rc < 0) {
10283 return false;
10284 } else if (rc != 0) {
Matt Carlson679563f2009-09-01 12:55:46 +000010285 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10286 return false;
Joe Perches05dbe002010-02-17 19:44:19 +000010287 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10288 tp->irq_cnt, rc);
Matt Carlson679563f2009-09-01 12:55:46 +000010289 tp->irq_cnt = rc;
Michael Chan49a359e2012-09-28 07:12:37 +000010290 tp->rxq_cnt = max(rc - 1, 1);
Michael Chan91024262012-09-28 07:12:38 +000010291 if (tp->txq_cnt)
10292 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
Matt Carlson679563f2009-09-01 12:55:46 +000010293 }
10294
10295 for (i = 0; i < tp->irq_max; i++)
10296 tp->napi[i].irq_vec = msix_ent[i].vector;
10297
Michael Chan49a359e2012-09-28 07:12:37 +000010298 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
Ben Hutchings2ddaad32010-09-27 22:11:51 -070010299 pci_disable_msix(tp->pdev);
10300 return false;
10301 }
Matt Carlsonb92b9042010-11-24 08:31:51 +000010302
Michael Chan91024262012-09-28 07:12:38 +000010303 if (tp->irq_cnt == 1)
10304 return true;
Matt Carlsond78b59f2011-04-05 14:22:46 +000010305
Michael Chan91024262012-09-28 07:12:38 +000010306 tg3_flag_set(tp, ENABLE_RSS);
10307
10308 if (tp->txq_cnt > 1)
10309 tg3_flag_set(tp, ENABLE_TSS);
10310
10311 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
Matt Carlson2430b032010-06-05 17:24:34 +000010312
Matt Carlson679563f2009-09-01 12:55:46 +000010313 return true;
10314}
10315
Matt Carlson07b01732009-08-28 14:01:15 +000010316static void tg3_ints_init(struct tg3 *tp)
10317{
Joe Perches63c3a662011-04-26 08:12:10 +000010318 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10319 !tg3_flag(tp, TAGGED_STATUS)) {
Matt Carlson07b01732009-08-28 14:01:15 +000010320 /* All MSI supporting chips should support tagged
10321 * status. Assert that this is the case.
10322 */
Matt Carlson5129c3a2010-04-05 10:19:23 +000010323 netdev_warn(tp->dev,
10324 "MSI without TAGGED_STATUS? Not using MSI\n");
Matt Carlson679563f2009-09-01 12:55:46 +000010325 goto defcfg;
Matt Carlson07b01732009-08-28 14:01:15 +000010326 }
Matt Carlson4f125f42009-09-01 12:55:02 +000010327
Joe Perches63c3a662011-04-26 08:12:10 +000010328 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10329 tg3_flag_set(tp, USING_MSIX);
10330 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10331 tg3_flag_set(tp, USING_MSI);
Matt Carlson679563f2009-09-01 12:55:46 +000010332
Joe Perches63c3a662011-04-26 08:12:10 +000010333 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
Matt Carlson679563f2009-09-01 12:55:46 +000010334 u32 msi_mode = tr32(MSGINT_MODE);
Joe Perches63c3a662011-04-26 08:12:10 +000010335 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
Matt Carlsonbaf8a942009-09-01 13:13:00 +000010336 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
Matt Carlson5b39de92011-08-31 11:44:50 +000010337 if (!tg3_flag(tp, 1SHOT_MSI))
10338 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
Matt Carlson679563f2009-09-01 12:55:46 +000010339 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10340 }
10341defcfg:
Joe Perches63c3a662011-04-26 08:12:10 +000010342 if (!tg3_flag(tp, USING_MSIX)) {
Matt Carlson679563f2009-09-01 12:55:46 +000010343 tp->irq_cnt = 1;
10344 tp->napi[0].irq_vec = tp->pdev->irq;
Michael Chan49a359e2012-09-28 07:12:37 +000010345 }
10346
10347 if (tp->irq_cnt == 1) {
10348 tp->txq_cnt = 1;
10349 tp->rxq_cnt = 1;
Ben Hutchings2ddaad32010-09-27 22:11:51 -070010350 netif_set_real_num_tx_queues(tp->dev, 1);
Matt Carlson85407882010-10-06 13:40:58 -070010351 netif_set_real_num_rx_queues(tp->dev, 1);
Matt Carlson679563f2009-09-01 12:55:46 +000010352 }
Matt Carlson07b01732009-08-28 14:01:15 +000010353}
10354
10355static void tg3_ints_fini(struct tg3 *tp)
10356{
Joe Perches63c3a662011-04-26 08:12:10 +000010357 if (tg3_flag(tp, USING_MSIX))
Matt Carlson679563f2009-09-01 12:55:46 +000010358 pci_disable_msix(tp->pdev);
Joe Perches63c3a662011-04-26 08:12:10 +000010359 else if (tg3_flag(tp, USING_MSI))
Matt Carlson679563f2009-09-01 12:55:46 +000010360 pci_disable_msi(tp->pdev);
Joe Perches63c3a662011-04-26 08:12:10 +000010361 tg3_flag_clear(tp, USING_MSI);
10362 tg3_flag_clear(tp, USING_MSIX);
10363 tg3_flag_clear(tp, ENABLE_RSS);
10364 tg3_flag_clear(tp, ENABLE_TSS);
Matt Carlson07b01732009-08-28 14:01:15 +000010365}
10366
Michael Chand8f4cd32012-09-28 07:12:40 +000010367static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010368{
Michael Chand8f4cd32012-09-28 07:12:40 +000010369 struct net_device *dev = tp->dev;
Matt Carlson4f125f42009-09-01 12:55:02 +000010370 int i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010371
Matt Carlson679563f2009-09-01 12:55:46 +000010372 /*
10373 * Setup interrupts first so we know how
10374 * many NAPI resources to allocate
10375 */
10376 tg3_ints_init(tp);
10377
Matt Carlson90415472011-12-16 13:33:23 +000010378 tg3_rss_check_indir_tbl(tp);
Matt Carlsonbcebcc42011-12-14 11:10:01 +000010379
Linus Torvalds1da177e2005-04-16 15:20:36 -070010380 /* The placement of this call is tied
10381 * to the setup and use of Host TX descriptors.
10382 */
10383 err = tg3_alloc_consistent(tp);
10384 if (err)
Matt Carlson679563f2009-09-01 12:55:46 +000010385 goto err_out1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010386
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010387 tg3_napi_init(tp);
10388
Matt Carlsonfed97812009-09-01 13:10:19 +000010389 tg3_napi_enable(tp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -070010390
Matt Carlson4f125f42009-09-01 12:55:02 +000010391 for (i = 0; i < tp->irq_cnt; i++) {
10392 struct tg3_napi *tnapi = &tp->napi[i];
10393 err = tg3_request_irq(tp, i);
10394 if (err) {
Matt Carlson5bc09182011-11-04 09:15:01 +000010395 for (i--; i >= 0; i--) {
10396 tnapi = &tp->napi[i];
Matt Carlson4f125f42009-09-01 12:55:02 +000010397 free_irq(tnapi->irq_vec, tnapi);
Matt Carlson5bc09182011-11-04 09:15:01 +000010398 }
10399 goto err_out2;
Matt Carlson4f125f42009-09-01 12:55:02 +000010400 }
10401 }
Matt Carlson07b01732009-08-28 14:01:15 +000010402
David S. Millerf47c11e2005-06-24 20:18:35 -070010403 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010404
Michael Chand8f4cd32012-09-28 07:12:40 +000010405 err = tg3_init_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010406 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -070010407 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010408 tg3_free_rings(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010409 }
10410
David S. Millerf47c11e2005-06-24 20:18:35 -070010411 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010412
Matt Carlson07b01732009-08-28 14:01:15 +000010413 if (err)
Matt Carlson679563f2009-09-01 12:55:46 +000010414 goto err_out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010415
Michael Chand8f4cd32012-09-28 07:12:40 +000010416 if (test_irq && tg3_flag(tp, USING_MSI)) {
Michael Chan79381092005-04-21 17:13:59 -070010417 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -070010418
Michael Chan79381092005-04-21 17:13:59 -070010419 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070010420 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070010421 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -070010422 tg3_free_rings(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070010423 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -070010424
Matt Carlson679563f2009-09-01 12:55:46 +000010425 goto err_out2;
Michael Chan79381092005-04-21 17:13:59 -070010426 }
Michael Chanfcfa0a32006-03-20 22:28:41 -080010427
Joe Perches63c3a662011-04-26 08:12:10 +000010428 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010429 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010430
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010431 tw32(PCIE_TRANSACTION_CFG,
10432 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010433 }
Michael Chan79381092005-04-21 17:13:59 -070010434 }
10435
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010436 tg3_phy_start(tp);
10437
Michael Chanaed93e02012-07-16 16:24:02 +000010438 tg3_hwmon_open(tp);
10439
David S. Millerf47c11e2005-06-24 20:18:35 -070010440 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010441
Matt Carlson21f76382012-02-22 12:35:21 +000010442 tg3_timer_start(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000010443 tg3_flag_set(tp, INIT_COMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010444 tg3_enable_ints(tp);
10445
David S. Millerf47c11e2005-06-24 20:18:35 -070010446 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010447
Matt Carlsonfe5f5782009-09-01 13:09:39 +000010448 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010449
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000010450 /*
10451 * Reset loopback feature if it was turned on while the device was down
10452 * make sure that it's installed properly now.
10453 */
10454 if (dev->features & NETIF_F_LOOPBACK)
10455 tg3_set_loopback(dev, dev->features);
10456
Linus Torvalds1da177e2005-04-16 15:20:36 -070010457 return 0;
Matt Carlson07b01732009-08-28 14:01:15 +000010458
Matt Carlson679563f2009-09-01 12:55:46 +000010459err_out3:
Matt Carlson4f125f42009-09-01 12:55:02 +000010460 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10461 struct tg3_napi *tnapi = &tp->napi[i];
10462 free_irq(tnapi->irq_vec, tnapi);
10463 }
Matt Carlson07b01732009-08-28 14:01:15 +000010464
Matt Carlson679563f2009-09-01 12:55:46 +000010465err_out2:
Matt Carlsonfed97812009-09-01 13:10:19 +000010466 tg3_napi_disable(tp);
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010467 tg3_napi_fini(tp);
Matt Carlson07b01732009-08-28 14:01:15 +000010468 tg3_free_consistent(tp);
Matt Carlson679563f2009-09-01 12:55:46 +000010469
10470err_out1:
10471 tg3_ints_fini(tp);
Michael Chand8f4cd32012-09-28 07:12:40 +000010472
Matt Carlson07b01732009-08-28 14:01:15 +000010473 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010474}
10475
Michael Chan65138592012-09-28 07:12:41 +000010476static void tg3_stop(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010477{
Matt Carlson4f125f42009-09-01 12:55:02 +000010478 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010479
Matt Carlsondb219972011-11-04 09:15:03 +000010480 tg3_reset_task_cancel(tp);
Nithin Nayak Sujirbd473da2012-11-05 14:26:30 +000010481 tg3_netif_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010482
Matt Carlson21f76382012-02-22 12:35:21 +000010483 tg3_timer_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010484
Michael Chanaed93e02012-07-16 16:24:02 +000010485 tg3_hwmon_close(tp);
10486
Matt Carlson24bb4fb2009-10-05 17:55:29 +000010487 tg3_phy_stop(tp);
10488
David S. Millerf47c11e2005-06-24 20:18:35 -070010489 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010490
10491 tg3_disable_ints(tp);
10492
Michael Chan944d9802005-05-29 14:57:48 -070010493 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010494 tg3_free_rings(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000010495 tg3_flag_clear(tp, INIT_COMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010496
David S. Millerf47c11e2005-06-24 20:18:35 -070010497 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010498
Matt Carlson4f125f42009-09-01 12:55:02 +000010499 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10500 struct tg3_napi *tnapi = &tp->napi[i];
10501 free_irq(tnapi->irq_vec, tnapi);
10502 }
Matt Carlson07b01732009-08-28 14:01:15 +000010503
10504 tg3_ints_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010505
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010506 tg3_napi_fini(tp);
10507
Linus Torvalds1da177e2005-04-16 15:20:36 -070010508 tg3_free_consistent(tp);
Michael Chan65138592012-09-28 07:12:41 +000010509}
10510
Michael Chand8f4cd32012-09-28 07:12:40 +000010511static int tg3_open(struct net_device *dev)
10512{
10513 struct tg3 *tp = netdev_priv(dev);
10514 int err;
10515
10516 if (tp->fw_needed) {
10517 err = tg3_request_firmware(tp);
10518 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10519 if (err)
10520 return err;
10521 } else if (err) {
10522 netdev_warn(tp->dev, "TSO capability disabled\n");
10523 tg3_flag_clear(tp, TSO_CAPABLE);
10524 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10525 netdev_notice(tp->dev, "TSO capability restored\n");
10526 tg3_flag_set(tp, TSO_CAPABLE);
10527 }
10528 }
10529
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000010530 tg3_carrier_off(tp);
Michael Chand8f4cd32012-09-28 07:12:40 +000010531
10532 err = tg3_power_up(tp);
10533 if (err)
10534 return err;
10535
10536 tg3_full_lock(tp, 0);
10537
10538 tg3_disable_ints(tp);
10539 tg3_flag_clear(tp, INIT_COMPLETE);
10540
10541 tg3_full_unlock(tp);
10542
10543 err = tg3_start(tp, true, true);
10544 if (err) {
10545 tg3_frob_aux_power(tp, false);
10546 pci_set_power_state(tp->pdev, PCI_D3hot);
10547 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010548 return err;
10549}
10550
10551static int tg3_close(struct net_device *dev)
10552{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010553 struct tg3 *tp = netdev_priv(dev);
10554
Michael Chan65138592012-09-28 07:12:41 +000010555 tg3_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010556
10557 /* Clear stats across close / open calls */
10558 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10559 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010560
10561 tg3_power_down(tp);
10562
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000010563 tg3_carrier_off(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010564
10565 return 0;
10566}
10567
10568static inline u64 get_stat64(tg3_stat64_t *val)
10569{
10570 return ((u64)val->high << 32) | ((u64)val->low);
10571}
10572
10573static u64 tg3_calc_crc_errors(struct tg3 *tp)
10574{
10575 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10576
10577 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10578 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10580 u32 val;
10581
10582 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10583 tg3_writephy(tp, MII_TG3_TEST1,
10584 val | MII_TG3_TEST1_CRC_EN);
10585 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10586 } else
10587 val = 0;
10588
10589 tp->phy_crc_errors += val;
10590
10591 return tp->phy_crc_errors;
10592 }
10593
10594 return get_stat64(&hw_stats->rx_fcs_errors);
10595}
10596
10597#define ESTAT_ADD(member) \
10598 estats->member = old_estats->member + \
10599 get_stat64(&hw_stats->member)
10600
10601static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10602{
10603 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10604 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10605
10606 ESTAT_ADD(rx_octets);
10607 ESTAT_ADD(rx_fragments);
10608 ESTAT_ADD(rx_ucast_packets);
10609 ESTAT_ADD(rx_mcast_packets);
10610 ESTAT_ADD(rx_bcast_packets);
10611 ESTAT_ADD(rx_fcs_errors);
10612 ESTAT_ADD(rx_align_errors);
10613 ESTAT_ADD(rx_xon_pause_rcvd);
10614 ESTAT_ADD(rx_xoff_pause_rcvd);
10615 ESTAT_ADD(rx_mac_ctrl_rcvd);
10616 ESTAT_ADD(rx_xoff_entered);
10617 ESTAT_ADD(rx_frame_too_long_errors);
10618 ESTAT_ADD(rx_jabbers);
10619 ESTAT_ADD(rx_undersize_packets);
10620 ESTAT_ADD(rx_in_length_errors);
10621 ESTAT_ADD(rx_out_length_errors);
10622 ESTAT_ADD(rx_64_or_less_octet_packets);
10623 ESTAT_ADD(rx_65_to_127_octet_packets);
10624 ESTAT_ADD(rx_128_to_255_octet_packets);
10625 ESTAT_ADD(rx_256_to_511_octet_packets);
10626 ESTAT_ADD(rx_512_to_1023_octet_packets);
10627 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10628 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10629 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10630 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10631 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10632
10633 ESTAT_ADD(tx_octets);
10634 ESTAT_ADD(tx_collisions);
10635 ESTAT_ADD(tx_xon_sent);
10636 ESTAT_ADD(tx_xoff_sent);
10637 ESTAT_ADD(tx_flow_control);
10638 ESTAT_ADD(tx_mac_errors);
10639 ESTAT_ADD(tx_single_collisions);
10640 ESTAT_ADD(tx_mult_collisions);
10641 ESTAT_ADD(tx_deferred);
10642 ESTAT_ADD(tx_excessive_collisions);
10643 ESTAT_ADD(tx_late_collisions);
10644 ESTAT_ADD(tx_collide_2times);
10645 ESTAT_ADD(tx_collide_3times);
10646 ESTAT_ADD(tx_collide_4times);
10647 ESTAT_ADD(tx_collide_5times);
10648 ESTAT_ADD(tx_collide_6times);
10649 ESTAT_ADD(tx_collide_7times);
10650 ESTAT_ADD(tx_collide_8times);
10651 ESTAT_ADD(tx_collide_9times);
10652 ESTAT_ADD(tx_collide_10times);
10653 ESTAT_ADD(tx_collide_11times);
10654 ESTAT_ADD(tx_collide_12times);
10655 ESTAT_ADD(tx_collide_13times);
10656 ESTAT_ADD(tx_collide_14times);
10657 ESTAT_ADD(tx_collide_15times);
10658 ESTAT_ADD(tx_ucast_packets);
10659 ESTAT_ADD(tx_mcast_packets);
10660 ESTAT_ADD(tx_bcast_packets);
10661 ESTAT_ADD(tx_carrier_sense_errors);
10662 ESTAT_ADD(tx_discards);
10663 ESTAT_ADD(tx_errors);
10664
10665 ESTAT_ADD(dma_writeq_full);
10666 ESTAT_ADD(dma_write_prioq_full);
10667 ESTAT_ADD(rxbds_empty);
10668 ESTAT_ADD(rx_discards);
10669 ESTAT_ADD(rx_errors);
10670 ESTAT_ADD(rx_threshold_hit);
10671
10672 ESTAT_ADD(dma_readq_full);
10673 ESTAT_ADD(dma_read_prioq_full);
10674 ESTAT_ADD(tx_comp_queue_full);
10675
10676 ESTAT_ADD(ring_set_send_prod_index);
10677 ESTAT_ADD(ring_status_update);
10678 ESTAT_ADD(nic_irqs);
10679 ESTAT_ADD(nic_avoided_irqs);
10680 ESTAT_ADD(nic_tx_threshold_hit);
10681
Matt Carlson4452d092011-05-19 12:12:51 +000010682 ESTAT_ADD(mbuf_lwm_thresh_hit);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010683}
10684
Matt Carlson65ec6982012-02-28 23:33:37 +000010685static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010686{
Eric Dumazet511d2222010-07-07 20:44:24 +000010687 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010688 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10689
Linus Torvalds1da177e2005-04-16 15:20:36 -070010690 stats->rx_packets = old_stats->rx_packets +
10691 get_stat64(&hw_stats->rx_ucast_packets) +
10692 get_stat64(&hw_stats->rx_mcast_packets) +
10693 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010694
Linus Torvalds1da177e2005-04-16 15:20:36 -070010695 stats->tx_packets = old_stats->tx_packets +
10696 get_stat64(&hw_stats->tx_ucast_packets) +
10697 get_stat64(&hw_stats->tx_mcast_packets) +
10698 get_stat64(&hw_stats->tx_bcast_packets);
10699
10700 stats->rx_bytes = old_stats->rx_bytes +
10701 get_stat64(&hw_stats->rx_octets);
10702 stats->tx_bytes = old_stats->tx_bytes +
10703 get_stat64(&hw_stats->tx_octets);
10704
10705 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -070010706 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010707 stats->tx_errors = old_stats->tx_errors +
10708 get_stat64(&hw_stats->tx_errors) +
10709 get_stat64(&hw_stats->tx_mac_errors) +
10710 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10711 get_stat64(&hw_stats->tx_discards);
10712
10713 stats->multicast = old_stats->multicast +
10714 get_stat64(&hw_stats->rx_mcast_packets);
10715 stats->collisions = old_stats->collisions +
10716 get_stat64(&hw_stats->tx_collisions);
10717
10718 stats->rx_length_errors = old_stats->rx_length_errors +
10719 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10720 get_stat64(&hw_stats->rx_undersize_packets);
10721
10722 stats->rx_over_errors = old_stats->rx_over_errors +
10723 get_stat64(&hw_stats->rxbds_empty);
10724 stats->rx_frame_errors = old_stats->rx_frame_errors +
10725 get_stat64(&hw_stats->rx_align_errors);
10726 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10727 get_stat64(&hw_stats->tx_discards);
10728 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10729 get_stat64(&hw_stats->tx_carrier_sense_errors);
10730
10731 stats->rx_crc_errors = old_stats->rx_crc_errors +
Matt Carlson65ec6982012-02-28 23:33:37 +000010732 tg3_calc_crc_errors(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010733
John W. Linville4f63b872005-09-12 14:43:18 -070010734 stats->rx_missed_errors = old_stats->rx_missed_errors +
10735 get_stat64(&hw_stats->rx_discards);
10736
Eric Dumazetb0057c52010-10-10 19:55:52 +000010737 stats->rx_dropped = tp->rx_dropped;
Eric Dumazet48855432011-10-24 07:53:03 +000010738 stats->tx_dropped = tp->tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010739}
10740
Linus Torvalds1da177e2005-04-16 15:20:36 -070010741static int tg3_get_regs_len(struct net_device *dev)
10742{
Matt Carlson97bd8e42011-04-13 11:05:04 +000010743 return TG3_REG_BLK_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010744}
10745
10746static void tg3_get_regs(struct net_device *dev,
10747 struct ethtool_regs *regs, void *_p)
10748{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010749 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010750
10751 regs->version = 0;
10752
Matt Carlson97bd8e42011-04-13 11:05:04 +000010753 memset(_p, 0, TG3_REG_BLK_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010754
Matt Carlson80096062010-08-02 11:26:06 +000010755 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080010756 return;
10757
David S. Millerf47c11e2005-06-24 20:18:35 -070010758 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010759
Matt Carlson97bd8e42011-04-13 11:05:04 +000010760 tg3_dump_legacy_regs(tp, (u32 *)_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010761
David S. Millerf47c11e2005-06-24 20:18:35 -070010762 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010763}
10764
10765static int tg3_get_eeprom_len(struct net_device *dev)
10766{
10767 struct tg3 *tp = netdev_priv(dev);
10768
10769 return tp->nvram_size;
10770}
10771
Linus Torvalds1da177e2005-04-16 15:20:36 -070010772static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10773{
10774 struct tg3 *tp = netdev_priv(dev);
10775 int ret;
10776 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -080010777 u32 i, offset, len, b_offset, b_count;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010778 __be32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010779
Joe Perches63c3a662011-04-26 08:12:10 +000010780 if (tg3_flag(tp, NO_NVRAM))
Matt Carlsondf259d82009-04-20 06:57:14 +000010781 return -EINVAL;
10782
Matt Carlson80096062010-08-02 11:26:06 +000010783 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080010784 return -EAGAIN;
10785
Linus Torvalds1da177e2005-04-16 15:20:36 -070010786 offset = eeprom->offset;
10787 len = eeprom->len;
10788 eeprom->len = 0;
10789
10790 eeprom->magic = TG3_EEPROM_MAGIC;
10791
10792 if (offset & 3) {
10793 /* adjustments to start on required 4 byte boundary */
10794 b_offset = offset & 3;
10795 b_count = 4 - b_offset;
10796 if (b_count > len) {
10797 /* i.e. offset=1 len=2 */
10798 b_count = len;
10799 }
Matt Carlsona9dc5292009-02-25 14:25:30 +000010800 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010801 if (ret)
10802 return ret;
Matt Carlsonbe98da62010-07-11 09:31:46 +000010803 memcpy(data, ((char *)&val) + b_offset, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010804 len -= b_count;
10805 offset += b_count;
Matt Carlsonc6cdf432010-04-05 10:19:26 +000010806 eeprom->len += b_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010807 }
10808
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010809 /* read bytes up to the last 4 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010810 pd = &data[eeprom->len];
10811 for (i = 0; i < (len - (len & 3)); i += 4) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000010812 ret = tg3_nvram_read_be32(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010813 if (ret) {
10814 eeprom->len += i;
10815 return ret;
10816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010817 memcpy(pd + i, &val, 4);
10818 }
10819 eeprom->len += i;
10820
10821 if (len & 3) {
10822 /* read last bytes not ending on 4 byte boundary */
10823 pd = &data[eeprom->len];
10824 b_count = len & 3;
10825 b_offset = offset + len - b_count;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010826 ret = tg3_nvram_read_be32(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010827 if (ret)
10828 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -080010829 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010830 eeprom->len += b_count;
10831 }
10832 return 0;
10833}
10834
Linus Torvalds1da177e2005-04-16 15:20:36 -070010835static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10836{
10837 struct tg3 *tp = netdev_priv(dev);
10838 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -080010839 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010840 u8 *buf;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010841 __be32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010842
Matt Carlson80096062010-08-02 11:26:06 +000010843 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080010844 return -EAGAIN;
10845
Joe Perches63c3a662011-04-26 08:12:10 +000010846 if (tg3_flag(tp, NO_NVRAM) ||
Matt Carlsondf259d82009-04-20 06:57:14 +000010847 eeprom->magic != TG3_EEPROM_MAGIC)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010848 return -EINVAL;
10849
10850 offset = eeprom->offset;
10851 len = eeprom->len;
10852
10853 if ((b_offset = (offset & 3))) {
10854 /* adjustments to start on required 4 byte boundary */
Matt Carlsona9dc5292009-02-25 14:25:30 +000010855 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010856 if (ret)
10857 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010858 len += b_offset;
10859 offset &= ~3;
Michael Chan1c8594b42005-04-21 17:12:46 -070010860 if (len < 4)
10861 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010862 }
10863
10864 odd_len = 0;
Michael Chan1c8594b42005-04-21 17:12:46 -070010865 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010866 /* adjustments to end on required 4 byte boundary */
10867 odd_len = 1;
10868 len = (len + 3) & ~3;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010869 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010870 if (ret)
10871 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010872 }
10873
10874 buf = data;
10875 if (b_offset || odd_len) {
10876 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010010877 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010878 return -ENOMEM;
10879 if (b_offset)
10880 memcpy(buf, &start, 4);
10881 if (odd_len)
10882 memcpy(buf+len-4, &end, 4);
10883 memcpy(buf + b_offset, data, eeprom->len);
10884 }
10885
10886 ret = tg3_nvram_write_block(tp, offset, len, buf);
10887
10888 if (buf != data)
10889 kfree(buf);
10890
10891 return ret;
10892}
10893
10894static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10895{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010896 struct tg3 *tp = netdev_priv(dev);
10897
Joe Perches63c3a662011-04-26 08:12:10 +000010898 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010899 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010900 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010901 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010902 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10903 return phy_ethtool_gset(phydev, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010904 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010905
Linus Torvalds1da177e2005-04-16 15:20:36 -070010906 cmd->supported = (SUPPORTED_Autoneg);
10907
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010908 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010909 cmd->supported |= (SUPPORTED_1000baseT_Half |
10910 SUPPORTED_1000baseT_Full);
10911
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010912 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010913 cmd->supported |= (SUPPORTED_100baseT_Half |
10914 SUPPORTED_100baseT_Full |
10915 SUPPORTED_10baseT_Half |
10916 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -080010917 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -070010918 cmd->port = PORT_TP;
10919 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010920 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -070010921 cmd->port = PORT_FIBRE;
10922 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010923
Linus Torvalds1da177e2005-04-16 15:20:36 -070010924 cmd->advertising = tp->link_config.advertising;
Matt Carlson5bb09772011-06-13 13:39:00 +000010925 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10926 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10927 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10928 cmd->advertising |= ADVERTISED_Pause;
10929 } else {
10930 cmd->advertising |= ADVERTISED_Pause |
10931 ADVERTISED_Asym_Pause;
10932 }
10933 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10934 cmd->advertising |= ADVERTISED_Asym_Pause;
10935 }
10936 }
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000010937 if (netif_running(dev) && tp->link_up) {
David Decotigny70739492011-04-27 18:32:40 +000010938 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010939 cmd->duplex = tp->link_config.active_duplex;
Matt Carlson859edb22011-12-08 14:40:16 +000010940 cmd->lp_advertising = tp->link_config.rmt_adv;
Matt Carlsone348c5e2011-11-21 15:01:20 +000010941 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10942 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10943 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10944 else
10945 cmd->eth_tp_mdix = ETH_TP_MDI;
10946 }
Matt Carlson64c22182010-10-14 10:37:44 +000010947 } else {
Matt Carlsone7405222012-02-13 15:20:16 +000010948 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10949 cmd->duplex = DUPLEX_UNKNOWN;
Matt Carlsone348c5e2011-11-21 15:01:20 +000010950 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010951 }
Matt Carlson882e9792009-09-01 13:21:36 +000010952 cmd->phy_address = tp->phy_addr;
Matt Carlson7e5856b2009-02-25 14:23:01 +000010953 cmd->transceiver = XCVR_INTERNAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010954 cmd->autoneg = tp->link_config.autoneg;
10955 cmd->maxtxpkt = 0;
10956 cmd->maxrxpkt = 0;
10957 return 0;
10958}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010959
Linus Torvalds1da177e2005-04-16 15:20:36 -070010960static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10961{
10962 struct tg3 *tp = netdev_priv(dev);
David Decotigny25db0332011-04-27 18:32:39 +000010963 u32 speed = ethtool_cmd_speed(cmd);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010964
Joe Perches63c3a662011-04-26 08:12:10 +000010965 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010966 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010967 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010968 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010969 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10970 return phy_ethtool_sset(phydev, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010971 }
10972
Matt Carlson7e5856b2009-02-25 14:23:01 +000010973 if (cmd->autoneg != AUTONEG_ENABLE &&
10974 cmd->autoneg != AUTONEG_DISABLE)
Michael Chan37ff2382005-10-26 15:49:51 -070010975 return -EINVAL;
Matt Carlson7e5856b2009-02-25 14:23:01 +000010976
10977 if (cmd->autoneg == AUTONEG_DISABLE &&
10978 cmd->duplex != DUPLEX_FULL &&
10979 cmd->duplex != DUPLEX_HALF)
Michael Chan37ff2382005-10-26 15:49:51 -070010980 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010981
Matt Carlson7e5856b2009-02-25 14:23:01 +000010982 if (cmd->autoneg == AUTONEG_ENABLE) {
10983 u32 mask = ADVERTISED_Autoneg |
10984 ADVERTISED_Pause |
10985 ADVERTISED_Asym_Pause;
10986
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010987 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
Matt Carlson7e5856b2009-02-25 14:23:01 +000010988 mask |= ADVERTISED_1000baseT_Half |
10989 ADVERTISED_1000baseT_Full;
10990
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010991 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlson7e5856b2009-02-25 14:23:01 +000010992 mask |= ADVERTISED_100baseT_Half |
10993 ADVERTISED_100baseT_Full |
10994 ADVERTISED_10baseT_Half |
10995 ADVERTISED_10baseT_Full |
10996 ADVERTISED_TP;
10997 else
10998 mask |= ADVERTISED_FIBRE;
10999
11000 if (cmd->advertising & ~mask)
11001 return -EINVAL;
11002
11003 mask &= (ADVERTISED_1000baseT_Half |
11004 ADVERTISED_1000baseT_Full |
11005 ADVERTISED_100baseT_Half |
11006 ADVERTISED_100baseT_Full |
11007 ADVERTISED_10baseT_Half |
11008 ADVERTISED_10baseT_Full);
11009
11010 cmd->advertising &= mask;
11011 } else {
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011012 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
David Decotigny25db0332011-04-27 18:32:39 +000011013 if (speed != SPEED_1000)
Matt Carlson7e5856b2009-02-25 14:23:01 +000011014 return -EINVAL;
11015
11016 if (cmd->duplex != DUPLEX_FULL)
11017 return -EINVAL;
11018 } else {
David Decotigny25db0332011-04-27 18:32:39 +000011019 if (speed != SPEED_100 &&
11020 speed != SPEED_10)
Matt Carlson7e5856b2009-02-25 14:23:01 +000011021 return -EINVAL;
11022 }
11023 }
11024
David S. Millerf47c11e2005-06-24 20:18:35 -070011025 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011026
11027 tp->link_config.autoneg = cmd->autoneg;
11028 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -070011029 tp->link_config.advertising = (cmd->advertising |
11030 ADVERTISED_Autoneg);
Matt Carlsone7405222012-02-13 15:20:16 +000011031 tp->link_config.speed = SPEED_UNKNOWN;
11032 tp->link_config.duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011033 } else {
11034 tp->link_config.advertising = 0;
David Decotigny25db0332011-04-27 18:32:39 +000011035 tp->link_config.speed = speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011036 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011037 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011038
Linus Torvalds1da177e2005-04-16 15:20:36 -070011039 if (netif_running(dev))
11040 tg3_setup_phy(tp, 1);
11041
David S. Millerf47c11e2005-06-24 20:18:35 -070011042 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011043
Linus Torvalds1da177e2005-04-16 15:20:36 -070011044 return 0;
11045}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011046
Linus Torvalds1da177e2005-04-16 15:20:36 -070011047static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11048{
11049 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011050
Rick Jones68aad782011-11-07 13:29:27 +000011051 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11052 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11053 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11054 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011055}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011056
Linus Torvalds1da177e2005-04-16 15:20:36 -070011057static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11058{
11059 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011060
Joe Perches63c3a662011-04-26 08:12:10 +000011061 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -070011062 wol->supported = WAKE_MAGIC;
11063 else
11064 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011065 wol->wolopts = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000011066 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011067 wol->wolopts = WAKE_MAGIC;
11068 memset(&wol->sopass, 0, sizeof(wol->sopass));
11069}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011070
Linus Torvalds1da177e2005-04-16 15:20:36 -070011071static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11072{
11073 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011074 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011075
Linus Torvalds1da177e2005-04-16 15:20:36 -070011076 if (wol->wolopts & ~WAKE_MAGIC)
11077 return -EINVAL;
11078 if ((wol->wolopts & WAKE_MAGIC) &&
Joe Perches63c3a662011-04-26 08:12:10 +000011079 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011080 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011081
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011082 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11083
David S. Millerf47c11e2005-06-24 20:18:35 -070011084 spin_lock_bh(&tp->lock);
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011085 if (device_may_wakeup(dp))
Joe Perches63c3a662011-04-26 08:12:10 +000011086 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011087 else
Joe Perches63c3a662011-04-26 08:12:10 +000011088 tg3_flag_clear(tp, WOL_ENABLE);
David S. Millerf47c11e2005-06-24 20:18:35 -070011089 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011090
Linus Torvalds1da177e2005-04-16 15:20:36 -070011091 return 0;
11092}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011093
Linus Torvalds1da177e2005-04-16 15:20:36 -070011094static u32 tg3_get_msglevel(struct net_device *dev)
11095{
11096 struct tg3 *tp = netdev_priv(dev);
11097 return tp->msg_enable;
11098}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011099
Linus Torvalds1da177e2005-04-16 15:20:36 -070011100static void tg3_set_msglevel(struct net_device *dev, u32 value)
11101{
11102 struct tg3 *tp = netdev_priv(dev);
11103 tp->msg_enable = value;
11104}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011105
Linus Torvalds1da177e2005-04-16 15:20:36 -070011106static int tg3_nway_reset(struct net_device *dev)
11107{
11108 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011109 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011110
Linus Torvalds1da177e2005-04-16 15:20:36 -070011111 if (!netif_running(dev))
11112 return -EAGAIN;
11113
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011114 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Michael Chanc94e3942005-09-27 12:12:42 -070011115 return -EINVAL;
11116
Joe Perches63c3a662011-04-26 08:12:10 +000011117 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011118 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011119 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000011120 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011121 } else {
11122 u32 bmcr;
11123
11124 spin_lock_bh(&tp->lock);
11125 r = -EINVAL;
11126 tg3_readphy(tp, MII_BMCR, &bmcr);
11127 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11128 ((bmcr & BMCR_ANENABLE) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011129 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011130 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11131 BMCR_ANENABLE);
11132 r = 0;
11133 }
11134 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011135 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011136
Linus Torvalds1da177e2005-04-16 15:20:36 -070011137 return r;
11138}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011139
Linus Torvalds1da177e2005-04-16 15:20:36 -070011140static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11141{
11142 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011143
Matt Carlson2c49a442010-09-30 10:34:35 +000011144 ering->rx_max_pending = tp->rx_std_ring_mask;
Joe Perches63c3a662011-04-26 08:12:10 +000011145 if (tg3_flag(tp, JUMBO_RING_ENABLE))
Matt Carlson2c49a442010-09-30 10:34:35 +000011146 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
Michael Chan4f81c322006-03-20 21:33:42 -080011147 else
11148 ering->rx_jumbo_max_pending = 0;
11149
11150 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011151
11152 ering->rx_pending = tp->rx_pending;
Joe Perches63c3a662011-04-26 08:12:10 +000011153 if (tg3_flag(tp, JUMBO_RING_ENABLE))
Michael Chan4f81c322006-03-20 21:33:42 -080011154 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11155 else
11156 ering->rx_jumbo_pending = 0;
11157
Matt Carlsonf3f3f272009-08-28 14:03:21 +000011158 ering->tx_pending = tp->napi[0].tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011159}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011160
Linus Torvalds1da177e2005-04-16 15:20:36 -070011161static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11162{
11163 struct tg3 *tp = netdev_priv(dev);
Matt Carlson646c9ed2009-09-01 12:58:41 +000011164 int i, irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011165
Matt Carlson2c49a442010-09-30 10:34:35 +000011166 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11167 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
Michael Chanbc3a9252006-10-18 20:55:18 -070011168 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11169 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Joe Perches63c3a662011-04-26 08:12:10 +000011170 (tg3_flag(tp, TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -070011171 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011172 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011173
Michael Chanbbe832c2005-06-24 20:20:04 -070011174 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011175 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011176 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070011177 irq_sync = 1;
11178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011179
Michael Chanbbe832c2005-06-24 20:20:04 -070011180 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011181
Linus Torvalds1da177e2005-04-16 15:20:36 -070011182 tp->rx_pending = ering->rx_pending;
11183
Joe Perches63c3a662011-04-26 08:12:10 +000011184 if (tg3_flag(tp, MAX_RXPEND_64) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011185 tp->rx_pending > 63)
11186 tp->rx_pending = 63;
11187 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
Matt Carlson646c9ed2009-09-01 12:58:41 +000011188
Matt Carlson6fd45cb2010-09-15 08:59:57 +000011189 for (i = 0; i < tp->irq_max; i++)
Matt Carlson646c9ed2009-09-01 12:58:41 +000011190 tp->napi[i].tx_pending = ering->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011191
11192 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -070011193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -070011194 err = tg3_restart_hw(tp, 1);
11195 if (!err)
11196 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011197 }
11198
David S. Millerf47c11e2005-06-24 20:18:35 -070011199 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011200
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011201 if (irq_sync && !err)
11202 tg3_phy_start(tp);
11203
Michael Chanb9ec6c12006-07-25 16:37:27 -070011204 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011205}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011206
Linus Torvalds1da177e2005-04-16 15:20:36 -070011207static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11208{
11209 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011210
Joe Perches63c3a662011-04-26 08:12:10 +000011211 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
Matt Carlson8d018622007-12-20 20:05:44 -080011212
Matt Carlson4a2db502011-12-08 14:40:17 +000011213 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
Matt Carlson8d018622007-12-20 20:05:44 -080011214 epause->rx_pause = 1;
11215 else
11216 epause->rx_pause = 0;
11217
Matt Carlson4a2db502011-12-08 14:40:17 +000011218 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
Matt Carlson8d018622007-12-20 20:05:44 -080011219 epause->tx_pause = 1;
11220 else
11221 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011222}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011223
Linus Torvalds1da177e2005-04-16 15:20:36 -070011224static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11225{
11226 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011227 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011228
Joe Perches63c3a662011-04-26 08:12:10 +000011229 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson27121682010-02-17 15:16:57 +000011230 u32 newadv;
11231 struct phy_device *phydev;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011232
Matt Carlson27121682010-02-17 15:16:57 +000011233 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011234
Matt Carlson27121682010-02-17 15:16:57 +000011235 if (!(phydev->supported & SUPPORTED_Pause) ||
11236 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
Nicolas Kaiser2259dca2010-10-07 23:29:27 +000011237 (epause->rx_pause != epause->tx_pause)))
Matt Carlson27121682010-02-17 15:16:57 +000011238 return -EINVAL;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011239
Matt Carlson27121682010-02-17 15:16:57 +000011240 tp->link_config.flowctrl = 0;
11241 if (epause->rx_pause) {
11242 tp->link_config.flowctrl |= FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011243
Matt Carlson27121682010-02-17 15:16:57 +000011244 if (epause->tx_pause) {
Steve Glendinninge18ce342008-12-16 02:00:00 -080011245 tp->link_config.flowctrl |= FLOW_CTRL_TX;
Matt Carlson27121682010-02-17 15:16:57 +000011246 newadv = ADVERTISED_Pause;
11247 } else
11248 newadv = ADVERTISED_Pause |
11249 ADVERTISED_Asym_Pause;
11250 } else if (epause->tx_pause) {
11251 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11252 newadv = ADVERTISED_Asym_Pause;
11253 } else
11254 newadv = 0;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011255
Matt Carlson27121682010-02-17 15:16:57 +000011256 if (epause->autoneg)
Joe Perches63c3a662011-04-26 08:12:10 +000011257 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlson27121682010-02-17 15:16:57 +000011258 else
Joe Perches63c3a662011-04-26 08:12:10 +000011259 tg3_flag_clear(tp, PAUSE_AUTONEG);
Matt Carlson27121682010-02-17 15:16:57 +000011260
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011261 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson27121682010-02-17 15:16:57 +000011262 u32 oldadv = phydev->advertising &
11263 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11264 if (oldadv != newadv) {
11265 phydev->advertising &=
11266 ~(ADVERTISED_Pause |
11267 ADVERTISED_Asym_Pause);
11268 phydev->advertising |= newadv;
11269 if (phydev->autoneg) {
11270 /*
11271 * Always renegotiate the link to
11272 * inform our link partner of our
11273 * flow control settings, even if the
11274 * flow control is forced. Let
11275 * tg3_adjust_link() do the final
11276 * flow control setup.
11277 */
11278 return phy_start_aneg(phydev);
11279 }
11280 }
11281
11282 if (!epause->autoneg)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011283 tg3_setup_flow_control(tp, 0, 0);
Matt Carlson27121682010-02-17 15:16:57 +000011284 } else {
Matt Carlsonc6700ce2012-02-13 15:20:15 +000011285 tp->link_config.advertising &=
Matt Carlson27121682010-02-17 15:16:57 +000011286 ~(ADVERTISED_Pause |
11287 ADVERTISED_Asym_Pause);
Matt Carlsonc6700ce2012-02-13 15:20:15 +000011288 tp->link_config.advertising |= newadv;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011289 }
11290 } else {
11291 int irq_sync = 0;
11292
11293 if (netif_running(dev)) {
11294 tg3_netif_stop(tp);
11295 irq_sync = 1;
11296 }
11297
11298 tg3_full_lock(tp, irq_sync);
11299
11300 if (epause->autoneg)
Joe Perches63c3a662011-04-26 08:12:10 +000011301 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011302 else
Joe Perches63c3a662011-04-26 08:12:10 +000011303 tg3_flag_clear(tp, PAUSE_AUTONEG);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011304 if (epause->rx_pause)
Steve Glendinninge18ce342008-12-16 02:00:00 -080011305 tp->link_config.flowctrl |= FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011306 else
Steve Glendinninge18ce342008-12-16 02:00:00 -080011307 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011308 if (epause->tx_pause)
Steve Glendinninge18ce342008-12-16 02:00:00 -080011309 tp->link_config.flowctrl |= FLOW_CTRL_TX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011310 else
Steve Glendinninge18ce342008-12-16 02:00:00 -080011311 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011312
11313 if (netif_running(dev)) {
11314 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11315 err = tg3_restart_hw(tp, 1);
11316 if (!err)
11317 tg3_netif_start(tp);
11318 }
11319
11320 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070011321 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011322
Michael Chanb9ec6c12006-07-25 16:37:27 -070011323 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011324}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011325
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011326static int tg3_get_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011327{
Jeff Garzikb9f2c042007-10-03 18:07:32 -070011328 switch (sset) {
11329 case ETH_SS_TEST:
11330 return TG3_NUM_TEST;
11331 case ETH_SS_STATS:
11332 return TG3_NUM_STATS;
11333 default:
11334 return -EOPNOTSUPP;
11335 }
Michael Chan4cafd3f2005-05-29 14:56:34 -070011336}
11337
Matt Carlson90415472011-12-16 13:33:23 +000011338static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11339 u32 *rules __always_unused)
11340{
11341 struct tg3 *tp = netdev_priv(dev);
11342
11343 if (!tg3_flag(tp, SUPPORT_MSIX))
11344 return -EOPNOTSUPP;
11345
11346 switch (info->cmd) {
11347 case ETHTOOL_GRXRINGS:
11348 if (netif_running(tp->dev))
Michael Chan91024262012-09-28 07:12:38 +000011349 info->data = tp->rxq_cnt;
Matt Carlson90415472011-12-16 13:33:23 +000011350 else {
11351 info->data = num_online_cpus();
Michael Chan91024262012-09-28 07:12:38 +000011352 if (info->data > TG3_RSS_MAX_NUM_QS)
11353 info->data = TG3_RSS_MAX_NUM_QS;
Matt Carlson90415472011-12-16 13:33:23 +000011354 }
11355
11356 /* The first interrupt vector only
11357 * handles link interrupts.
11358 */
11359 info->data -= 1;
11360 return 0;
11361
11362 default:
11363 return -EOPNOTSUPP;
11364 }
11365}
11366
11367static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11368{
11369 u32 size = 0;
11370 struct tg3 *tp = netdev_priv(dev);
11371
11372 if (tg3_flag(tp, SUPPORT_MSIX))
11373 size = TG3_RSS_INDIR_TBL_SIZE;
11374
11375 return size;
11376}
11377
11378static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11379{
11380 struct tg3 *tp = netdev_priv(dev);
11381 int i;
11382
11383 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11384 indir[i] = tp->rss_ind_tbl[i];
11385
11386 return 0;
11387}
11388
11389static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11390{
11391 struct tg3 *tp = netdev_priv(dev);
11392 size_t i;
11393
11394 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11395 tp->rss_ind_tbl[i] = indir[i];
11396
11397 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11398 return 0;
11399
11400 /* It is legal to write the indirection
11401 * table while the device is running.
11402 */
11403 tg3_full_lock(tp, 0);
11404 tg3_rss_write_indir_tbl(tp);
11405 tg3_full_unlock(tp);
11406
11407 return 0;
11408}
11409
Michael Chan09681692012-09-28 07:12:42 +000011410static void tg3_get_channels(struct net_device *dev,
11411 struct ethtool_channels *channel)
11412{
11413 struct tg3 *tp = netdev_priv(dev);
11414 u32 deflt_qs = netif_get_num_default_rss_queues();
11415
11416 channel->max_rx = tp->rxq_max;
11417 channel->max_tx = tp->txq_max;
11418
11419 if (netif_running(dev)) {
11420 channel->rx_count = tp->rxq_cnt;
11421 channel->tx_count = tp->txq_cnt;
11422 } else {
11423 if (tp->rxq_req)
11424 channel->rx_count = tp->rxq_req;
11425 else
11426 channel->rx_count = min(deflt_qs, tp->rxq_max);
11427
11428 if (tp->txq_req)
11429 channel->tx_count = tp->txq_req;
11430 else
11431 channel->tx_count = min(deflt_qs, tp->txq_max);
11432 }
11433}
11434
11435static int tg3_set_channels(struct net_device *dev,
11436 struct ethtool_channels *channel)
11437{
11438 struct tg3 *tp = netdev_priv(dev);
11439
11440 if (!tg3_flag(tp, SUPPORT_MSIX))
11441 return -EOPNOTSUPP;
11442
11443 if (channel->rx_count > tp->rxq_max ||
11444 channel->tx_count > tp->txq_max)
11445 return -EINVAL;
11446
11447 tp->rxq_req = channel->rx_count;
11448 tp->txq_req = channel->tx_count;
11449
11450 if (!netif_running(dev))
11451 return 0;
11452
11453 tg3_stop(tp);
11454
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000011455 tg3_carrier_off(tp);
Michael Chan09681692012-09-28 07:12:42 +000011456
11457 tg3_start(tp, true, false);
11458
11459 return 0;
11460}
11461
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011462static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011463{
11464 switch (stringset) {
11465 case ETH_SS_STATS:
11466 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11467 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -070011468 case ETH_SS_TEST:
11469 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11470 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011471 default:
11472 WARN_ON(1); /* we need a WARN() */
11473 break;
11474 }
11475}
11476
stephen hemminger81b87092011-04-04 08:43:50 +000011477static int tg3_set_phys_id(struct net_device *dev,
11478 enum ethtool_phys_id_state state)
Michael Chan4009a932005-09-05 17:52:54 -070011479{
11480 struct tg3 *tp = netdev_priv(dev);
Michael Chan4009a932005-09-05 17:52:54 -070011481
11482 if (!netif_running(tp->dev))
11483 return -EAGAIN;
11484
stephen hemminger81b87092011-04-04 08:43:50 +000011485 switch (state) {
11486 case ETHTOOL_ID_ACTIVE:
Allan, Bruce Wfce55922011-04-13 13:09:10 +000011487 return 1; /* cycle on/off once per second */
Michael Chan4009a932005-09-05 17:52:54 -070011488
stephen hemminger81b87092011-04-04 08:43:50 +000011489 case ETHTOOL_ID_ON:
11490 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11491 LED_CTRL_1000MBPS_ON |
11492 LED_CTRL_100MBPS_ON |
11493 LED_CTRL_10MBPS_ON |
11494 LED_CTRL_TRAFFIC_OVERRIDE |
11495 LED_CTRL_TRAFFIC_BLINK |
11496 LED_CTRL_TRAFFIC_LED);
11497 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011498
stephen hemminger81b87092011-04-04 08:43:50 +000011499 case ETHTOOL_ID_OFF:
11500 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11501 LED_CTRL_TRAFFIC_OVERRIDE);
11502 break;
Michael Chan4009a932005-09-05 17:52:54 -070011503
stephen hemminger81b87092011-04-04 08:43:50 +000011504 case ETHTOOL_ID_INACTIVE:
11505 tw32(MAC_LED_CTRL, tp->led_ctrl);
11506 break;
Michael Chan4009a932005-09-05 17:52:54 -070011507 }
stephen hemminger81b87092011-04-04 08:43:50 +000011508
Michael Chan4009a932005-09-05 17:52:54 -070011509 return 0;
11510}
11511
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011512static void tg3_get_ethtool_stats(struct net_device *dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -070011513 struct ethtool_stats *estats, u64 *tmp_stats)
11514{
11515 struct tg3 *tp = netdev_priv(dev);
Matt Carlson0e6c9da2011-12-08 14:40:13 +000011516
Matt Carlsonb546e462012-02-13 15:20:09 +000011517 if (tp->hw_stats)
11518 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11519 else
11520 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011521}
11522
Matt Carlson535a4902011-07-20 10:20:56 +000011523static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
Matt Carlsonc3e94502011-04-13 11:05:08 +000011524{
11525 int i;
11526 __be32 *buf;
11527 u32 offset = 0, len = 0;
11528 u32 magic, val;
11529
Joe Perches63c3a662011-04-26 08:12:10 +000011530 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
Matt Carlsonc3e94502011-04-13 11:05:08 +000011531 return NULL;
11532
11533 if (magic == TG3_EEPROM_MAGIC) {
11534 for (offset = TG3_NVM_DIR_START;
11535 offset < TG3_NVM_DIR_END;
11536 offset += TG3_NVM_DIRENT_SIZE) {
11537 if (tg3_nvram_read(tp, offset, &val))
11538 return NULL;
11539
11540 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11541 TG3_NVM_DIRTYPE_EXTVPD)
11542 break;
11543 }
11544
11545 if (offset != TG3_NVM_DIR_END) {
11546 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11547 if (tg3_nvram_read(tp, offset + 4, &offset))
11548 return NULL;
11549
11550 offset = tg3_nvram_logical_addr(tp, offset);
11551 }
11552 }
11553
11554 if (!offset || !len) {
11555 offset = TG3_NVM_VPD_OFF;
11556 len = TG3_NVM_VPD_LEN;
11557 }
11558
11559 buf = kmalloc(len, GFP_KERNEL);
11560 if (buf == NULL)
11561 return NULL;
11562
11563 if (magic == TG3_EEPROM_MAGIC) {
11564 for (i = 0; i < len; i += 4) {
11565 /* The data is in little-endian format in NVRAM.
11566 * Use the big-endian read routines to preserve
11567 * the byte order as it exists in NVRAM.
11568 */
11569 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11570 goto error;
11571 }
11572 } else {
11573 u8 *ptr;
11574 ssize_t cnt;
11575 unsigned int pos = 0;
11576
11577 ptr = (u8 *)&buf[0];
11578 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11579 cnt = pci_read_vpd(tp->pdev, pos,
11580 len - pos, ptr);
11581 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11582 cnt = 0;
11583 else if (cnt < 0)
11584 goto error;
11585 }
11586 if (pos != len)
11587 goto error;
11588 }
11589
Matt Carlson535a4902011-07-20 10:20:56 +000011590 *vpdlen = len;
11591
Matt Carlsonc3e94502011-04-13 11:05:08 +000011592 return buf;
11593
11594error:
11595 kfree(buf);
11596 return NULL;
11597}
11598
Michael Chan566f86a2005-05-29 14:56:58 -070011599#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -080011600#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11601#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11602#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Matt Carlson727a6d92011-06-13 13:38:58 +000011603#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11604#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
Matt Carlsonbda18fa2011-07-20 10:20:57 +000011605#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
Michael Chanb16250e2006-09-27 16:10:14 -070011606#define NVRAM_SELFBOOT_HW_SIZE 0x20
11607#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -070011608
11609static int tg3_test_nvram(struct tg3 *tp)
11610{
Matt Carlson535a4902011-07-20 10:20:56 +000011611 u32 csum, magic, len;
Matt Carlsona9dc5292009-02-25 14:25:30 +000011612 __be32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +010011613 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -070011614
Joe Perches63c3a662011-04-26 08:12:10 +000011615 if (tg3_flag(tp, NO_NVRAM))
Matt Carlsondf259d82009-04-20 06:57:14 +000011616 return 0;
11617
Matt Carlsone4f34112009-02-25 14:25:00 +000011618 if (tg3_nvram_read(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080011619 return -EIO;
11620
Michael Chan1b277772006-03-20 22:27:48 -080011621 if (magic == TG3_EEPROM_MAGIC)
11622 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -070011623 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -080011624 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11625 TG3_EEPROM_SB_FORMAT_1) {
11626 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11627 case TG3_EEPROM_SB_REVISION_0:
11628 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11629 break;
11630 case TG3_EEPROM_SB_REVISION_2:
11631 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11632 break;
11633 case TG3_EEPROM_SB_REVISION_3:
11634 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11635 break;
Matt Carlson727a6d92011-06-13 13:38:58 +000011636 case TG3_EEPROM_SB_REVISION_4:
11637 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11638 break;
11639 case TG3_EEPROM_SB_REVISION_5:
11640 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11641 break;
11642 case TG3_EEPROM_SB_REVISION_6:
11643 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11644 break;
Matt Carlsona5767de2007-11-12 21:10:58 -080011645 default:
Matt Carlson727a6d92011-06-13 13:38:58 +000011646 return -EIO;
Matt Carlsona5767de2007-11-12 21:10:58 -080011647 }
11648 } else
Michael Chan1b277772006-03-20 22:27:48 -080011649 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -070011650 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11651 size = NVRAM_SELFBOOT_HW_SIZE;
11652 else
Michael Chan1b277772006-03-20 22:27:48 -080011653 return -EIO;
11654
11655 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -070011656 if (buf == NULL)
11657 return -ENOMEM;
11658
Michael Chan1b277772006-03-20 22:27:48 -080011659 err = -EIO;
11660 for (i = 0, j = 0; i < size; i += 4, j++) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000011661 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11662 if (err)
Michael Chan566f86a2005-05-29 14:56:58 -070011663 break;
Michael Chan566f86a2005-05-29 14:56:58 -070011664 }
Michael Chan1b277772006-03-20 22:27:48 -080011665 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -070011666 goto out;
11667
Michael Chan1b277772006-03-20 22:27:48 -080011668 /* Selfboot format */
Matt Carlsona9dc5292009-02-25 14:25:30 +000011669 magic = be32_to_cpu(buf[0]);
Al Virob9fc7dc2007-12-17 22:59:57 -080011670 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -070011671 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -080011672 u8 *buf8 = (u8 *) buf, csum8 = 0;
11673
Al Virob9fc7dc2007-12-17 22:59:57 -080011674 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -080011675 TG3_EEPROM_SB_REVISION_2) {
11676 /* For rev 2, the csum doesn't include the MBA. */
11677 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11678 csum8 += buf8[i];
11679 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11680 csum8 += buf8[i];
11681 } else {
11682 for (i = 0; i < size; i++)
11683 csum8 += buf8[i];
11684 }
Michael Chan1b277772006-03-20 22:27:48 -080011685
Adrian Bunkad96b482006-04-05 22:21:04 -070011686 if (csum8 == 0) {
11687 err = 0;
11688 goto out;
11689 }
11690
11691 err = -EIO;
11692 goto out;
Michael Chan1b277772006-03-20 22:27:48 -080011693 }
Michael Chan566f86a2005-05-29 14:56:58 -070011694
Al Virob9fc7dc2007-12-17 22:59:57 -080011695 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -070011696 TG3_EEPROM_MAGIC_HW) {
11697 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
Matt Carlsona9dc5292009-02-25 14:25:30 +000011698 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
Michael Chanb16250e2006-09-27 16:10:14 -070011699 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -070011700
11701 /* Separate the parity bits and the data bytes. */
11702 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11703 if ((i == 0) || (i == 8)) {
11704 int l;
11705 u8 msk;
11706
11707 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11708 parity[k++] = buf8[i] & msk;
11709 i++;
Matt Carlson859a588792010-04-05 10:19:28 +000011710 } else if (i == 16) {
Michael Chanb16250e2006-09-27 16:10:14 -070011711 int l;
11712 u8 msk;
11713
11714 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11715 parity[k++] = buf8[i] & msk;
11716 i++;
11717
11718 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11719 parity[k++] = buf8[i] & msk;
11720 i++;
11721 }
11722 data[j++] = buf8[i];
11723 }
11724
11725 err = -EIO;
11726 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11727 u8 hw8 = hweight8(data[i]);
11728
11729 if ((hw8 & 0x1) && parity[i])
11730 goto out;
11731 else if (!(hw8 & 0x1) && !parity[i])
11732 goto out;
11733 }
11734 err = 0;
11735 goto out;
11736 }
11737
Matt Carlson01c3a392011-03-09 16:58:20 +000011738 err = -EIO;
11739
Michael Chan566f86a2005-05-29 14:56:58 -070011740 /* Bootstrap checksum at offset 0x10 */
11741 csum = calc_crc((unsigned char *) buf, 0x10);
Matt Carlson01c3a392011-03-09 16:58:20 +000011742 if (csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -070011743 goto out;
11744
11745 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11746 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Matt Carlson01c3a392011-03-09 16:58:20 +000011747 if (csum != le32_to_cpu(buf[0xfc/4]))
Matt Carlsona9dc5292009-02-25 14:25:30 +000011748 goto out;
Michael Chan566f86a2005-05-29 14:56:58 -070011749
Matt Carlsonc3e94502011-04-13 11:05:08 +000011750 kfree(buf);
11751
Matt Carlson535a4902011-07-20 10:20:56 +000011752 buf = tg3_vpd_readblock(tp, &len);
Matt Carlsonc3e94502011-04-13 11:05:08 +000011753 if (!buf)
11754 return -ENOMEM;
Matt Carlsond4894f32011-03-09 16:58:21 +000011755
Matt Carlson535a4902011-07-20 10:20:56 +000011756 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
Matt Carlsond4894f32011-03-09 16:58:21 +000011757 if (i > 0) {
11758 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11759 if (j < 0)
11760 goto out;
11761
Matt Carlson535a4902011-07-20 10:20:56 +000011762 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
Matt Carlsond4894f32011-03-09 16:58:21 +000011763 goto out;
11764
11765 i += PCI_VPD_LRDT_TAG_SIZE;
11766 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11767 PCI_VPD_RO_KEYWORD_CHKSUM);
11768 if (j > 0) {
11769 u8 csum8 = 0;
11770
11771 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11772
11773 for (i = 0; i <= j; i++)
11774 csum8 += ((u8 *)buf)[i];
11775
11776 if (csum8)
11777 goto out;
11778 }
11779 }
11780
Michael Chan566f86a2005-05-29 14:56:58 -070011781 err = 0;
11782
11783out:
11784 kfree(buf);
11785 return err;
11786}
11787
Michael Chanca430072005-05-29 14:57:23 -070011788#define TG3_SERDES_TIMEOUT_SEC 2
11789#define TG3_COPPER_TIMEOUT_SEC 6
11790
11791static int tg3_test_link(struct tg3 *tp)
11792{
11793 int i, max;
11794
11795 if (!netif_running(tp->dev))
11796 return -ENODEV;
11797
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011798 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -070011799 max = TG3_SERDES_TIMEOUT_SEC;
11800 else
11801 max = TG3_COPPER_TIMEOUT_SEC;
11802
11803 for (i = 0; i < max; i++) {
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000011804 if (tp->link_up)
Michael Chanca430072005-05-29 14:57:23 -070011805 return 0;
11806
11807 if (msleep_interruptible(1000))
11808 break;
11809 }
11810
11811 return -EIO;
11812}
11813
Michael Chana71116d2005-05-29 14:58:11 -070011814/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -080011815static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -070011816{
Michael Chanb16250e2006-09-27 16:10:14 -070011817 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -070011818 u32 offset, read_mask, write_mask, val, save_val, read_val;
11819 static struct {
11820 u16 offset;
11821 u16 flags;
11822#define TG3_FL_5705 0x1
11823#define TG3_FL_NOT_5705 0x2
11824#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -070011825#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -070011826 u32 read_mask;
11827 u32 write_mask;
11828 } reg_tbl[] = {
11829 /* MAC Control Registers */
11830 { MAC_MODE, TG3_FL_NOT_5705,
11831 0x00000000, 0x00ef6f8c },
11832 { MAC_MODE, TG3_FL_5705,
11833 0x00000000, 0x01ef6b8c },
11834 { MAC_STATUS, TG3_FL_NOT_5705,
11835 0x03800107, 0x00000000 },
11836 { MAC_STATUS, TG3_FL_5705,
11837 0x03800100, 0x00000000 },
11838 { MAC_ADDR_0_HIGH, 0x0000,
11839 0x00000000, 0x0000ffff },
11840 { MAC_ADDR_0_LOW, 0x0000,
Matt Carlsonc6cdf432010-04-05 10:19:26 +000011841 0x00000000, 0xffffffff },
Michael Chana71116d2005-05-29 14:58:11 -070011842 { MAC_RX_MTU_SIZE, 0x0000,
11843 0x00000000, 0x0000ffff },
11844 { MAC_TX_MODE, 0x0000,
11845 0x00000000, 0x00000070 },
11846 { MAC_TX_LENGTHS, 0x0000,
11847 0x00000000, 0x00003fff },
11848 { MAC_RX_MODE, TG3_FL_NOT_5705,
11849 0x00000000, 0x000007fc },
11850 { MAC_RX_MODE, TG3_FL_5705,
11851 0x00000000, 0x000007dc },
11852 { MAC_HASH_REG_0, 0x0000,
11853 0x00000000, 0xffffffff },
11854 { MAC_HASH_REG_1, 0x0000,
11855 0x00000000, 0xffffffff },
11856 { MAC_HASH_REG_2, 0x0000,
11857 0x00000000, 0xffffffff },
11858 { MAC_HASH_REG_3, 0x0000,
11859 0x00000000, 0xffffffff },
11860
11861 /* Receive Data and Receive BD Initiator Control Registers. */
11862 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11863 0x00000000, 0xffffffff },
11864 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11865 0x00000000, 0xffffffff },
11866 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11867 0x00000000, 0x00000003 },
11868 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11869 0x00000000, 0xffffffff },
11870 { RCVDBDI_STD_BD+0, 0x0000,
11871 0x00000000, 0xffffffff },
11872 { RCVDBDI_STD_BD+4, 0x0000,
11873 0x00000000, 0xffffffff },
11874 { RCVDBDI_STD_BD+8, 0x0000,
11875 0x00000000, 0xffff0002 },
11876 { RCVDBDI_STD_BD+0xc, 0x0000,
11877 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011878
Michael Chana71116d2005-05-29 14:58:11 -070011879 /* Receive BD Initiator Control Registers. */
11880 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11881 0x00000000, 0xffffffff },
11882 { RCVBDI_STD_THRESH, TG3_FL_5705,
11883 0x00000000, 0x000003ff },
11884 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11885 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011886
Michael Chana71116d2005-05-29 14:58:11 -070011887 /* Host Coalescing Control Registers. */
11888 { HOSTCC_MODE, TG3_FL_NOT_5705,
11889 0x00000000, 0x00000004 },
11890 { HOSTCC_MODE, TG3_FL_5705,
11891 0x00000000, 0x000000f6 },
11892 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11893 0x00000000, 0xffffffff },
11894 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11895 0x00000000, 0x000003ff },
11896 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11897 0x00000000, 0xffffffff },
11898 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11899 0x00000000, 0x000003ff },
11900 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11901 0x00000000, 0xffffffff },
11902 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11903 0x00000000, 0x000000ff },
11904 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11905 0x00000000, 0xffffffff },
11906 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11907 0x00000000, 0x000000ff },
11908 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11909 0x00000000, 0xffffffff },
11910 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11911 0x00000000, 0xffffffff },
11912 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11913 0x00000000, 0xffffffff },
11914 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11915 0x00000000, 0x000000ff },
11916 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11917 0x00000000, 0xffffffff },
11918 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11919 0x00000000, 0x000000ff },
11920 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11921 0x00000000, 0xffffffff },
11922 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11923 0x00000000, 0xffffffff },
11924 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11925 0x00000000, 0xffffffff },
11926 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11927 0x00000000, 0xffffffff },
11928 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11929 0x00000000, 0xffffffff },
11930 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11931 0xffffffff, 0x00000000 },
11932 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11933 0xffffffff, 0x00000000 },
11934
11935 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -070011936 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -070011937 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -070011938 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -070011939 0x00000000, 0x007fffff },
11940 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11941 0x00000000, 0x0000003f },
11942 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11943 0x00000000, 0x000001ff },
11944 { BUFMGR_MB_HIGH_WATER, 0x0000,
11945 0x00000000, 0x000001ff },
11946 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11947 0xffffffff, 0x00000000 },
11948 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11949 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011950
Michael Chana71116d2005-05-29 14:58:11 -070011951 /* Mailbox Registers */
11952 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11953 0x00000000, 0x000001ff },
11954 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11955 0x00000000, 0x000001ff },
11956 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11957 0x00000000, 0x000007ff },
11958 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11959 0x00000000, 0x000001ff },
11960
11961 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11962 };
11963
Michael Chanb16250e2006-09-27 16:10:14 -070011964 is_5705 = is_5750 = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000011965 if (tg3_flag(tp, 5705_PLUS)) {
Michael Chana71116d2005-05-29 14:58:11 -070011966 is_5705 = 1;
Joe Perches63c3a662011-04-26 08:12:10 +000011967 if (tg3_flag(tp, 5750_PLUS))
Michael Chanb16250e2006-09-27 16:10:14 -070011968 is_5750 = 1;
11969 }
Michael Chana71116d2005-05-29 14:58:11 -070011970
11971 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11972 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11973 continue;
11974
11975 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11976 continue;
11977
Joe Perches63c3a662011-04-26 08:12:10 +000011978 if (tg3_flag(tp, IS_5788) &&
Michael Chana71116d2005-05-29 14:58:11 -070011979 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11980 continue;
11981
Michael Chanb16250e2006-09-27 16:10:14 -070011982 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11983 continue;
11984
Michael Chana71116d2005-05-29 14:58:11 -070011985 offset = (u32) reg_tbl[i].offset;
11986 read_mask = reg_tbl[i].read_mask;
11987 write_mask = reg_tbl[i].write_mask;
11988
11989 /* Save the original register content */
11990 save_val = tr32(offset);
11991
11992 /* Determine the read-only value. */
11993 read_val = save_val & read_mask;
11994
11995 /* Write zero to the register, then make sure the read-only bits
11996 * are not changed and the read/write bits are all zeros.
11997 */
11998 tw32(offset, 0);
11999
12000 val = tr32(offset);
12001
12002 /* Test the read-only and read/write bits. */
12003 if (((val & read_mask) != read_val) || (val & write_mask))
12004 goto out;
12005
12006 /* Write ones to all the bits defined by RdMask and WrMask, then
12007 * make sure the read-only bits are not changed and the
12008 * read/write bits are all ones.
12009 */
12010 tw32(offset, read_mask | write_mask);
12011
12012 val = tr32(offset);
12013
12014 /* Test the read-only bits. */
12015 if ((val & read_mask) != read_val)
12016 goto out;
12017
12018 /* Test the read/write bits. */
12019 if ((val & write_mask) != write_mask)
12020 goto out;
12021
12022 tw32(offset, save_val);
12023 }
12024
12025 return 0;
12026
12027out:
Michael Chan9f88f292006-12-07 00:22:54 -080012028 if (netif_msg_hw(tp))
Matt Carlson2445e462010-04-05 10:19:21 +000012029 netdev_err(tp->dev,
12030 "Register test failed at offset %x\n", offset);
Michael Chana71116d2005-05-29 14:58:11 -070012031 tw32(offset, save_val);
12032 return -EIO;
12033}
12034
Michael Chan7942e1d2005-05-29 14:58:36 -070012035static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12036{
Arjan van de Venf71e1302006-03-03 21:33:57 -050012037 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -070012038 int i;
12039 u32 j;
12040
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +020012041 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -070012042 for (j = 0; j < len; j += 4) {
12043 u32 val;
12044
12045 tg3_write_mem(tp, offset + j, test_pattern[i]);
12046 tg3_read_mem(tp, offset + j, &val);
12047 if (val != test_pattern[i])
12048 return -EIO;
12049 }
12050 }
12051 return 0;
12052}
12053
12054static int tg3_test_memory(struct tg3 *tp)
12055{
12056 static struct mem_entry {
12057 u32 offset;
12058 u32 len;
12059 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -080012060 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -070012061 { 0x00002000, 0x1c000},
12062 { 0xffffffff, 0x00000}
12063 }, mem_tbl_5705[] = {
12064 { 0x00000100, 0x0000c},
12065 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -070012066 { 0x00004000, 0x00800},
12067 { 0x00006000, 0x01000},
12068 { 0x00008000, 0x02000},
12069 { 0x00010000, 0x0e000},
12070 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -080012071 }, mem_tbl_5755[] = {
12072 { 0x00000200, 0x00008},
12073 { 0x00004000, 0x00800},
12074 { 0x00006000, 0x00800},
12075 { 0x00008000, 0x02000},
12076 { 0x00010000, 0x0c000},
12077 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -070012078 }, mem_tbl_5906[] = {
12079 { 0x00000200, 0x00008},
12080 { 0x00004000, 0x00400},
12081 { 0x00006000, 0x00400},
12082 { 0x00008000, 0x01000},
12083 { 0x00010000, 0x01000},
12084 { 0xffffffff, 0x00000}
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012085 }, mem_tbl_5717[] = {
12086 { 0x00000200, 0x00008},
12087 { 0x00010000, 0x0a000},
12088 { 0x00020000, 0x13c00},
12089 { 0xffffffff, 0x00000}
12090 }, mem_tbl_57765[] = {
12091 { 0x00000200, 0x00008},
12092 { 0x00004000, 0x00800},
12093 { 0x00006000, 0x09800},
12094 { 0x00010000, 0x0a000},
12095 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -070012096 };
12097 struct mem_entry *mem_tbl;
12098 int err = 0;
12099 int i;
12100
Joe Perches63c3a662011-04-26 08:12:10 +000012101 if (tg3_flag(tp, 5717_PLUS))
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012102 mem_tbl = mem_tbl_5717;
Matt Carlson55086ad2011-12-14 11:09:59 +000012103 else if (tg3_flag(tp, 57765_CLASS))
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012104 mem_tbl = mem_tbl_57765;
Joe Perches63c3a662011-04-26 08:12:10 +000012105 else if (tg3_flag(tp, 5755_PLUS))
Matt Carlson321d32a2008-11-21 17:22:19 -080012106 mem_tbl = mem_tbl_5755;
12107 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12108 mem_tbl = mem_tbl_5906;
Joe Perches63c3a662011-04-26 08:12:10 +000012109 else if (tg3_flag(tp, 5705_PLUS))
Matt Carlson321d32a2008-11-21 17:22:19 -080012110 mem_tbl = mem_tbl_5705;
12111 else
Michael Chan7942e1d2005-05-29 14:58:36 -070012112 mem_tbl = mem_tbl_570x;
12113
12114 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
Matt Carlsonbe98da62010-07-11 09:31:46 +000012115 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12116 if (err)
Michael Chan7942e1d2005-05-29 14:58:36 -070012117 break;
12118 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012119
Michael Chan7942e1d2005-05-29 14:58:36 -070012120 return err;
12121}
12122
Matt Carlsonbb158d62011-04-25 12:42:47 +000012123#define TG3_TSO_MSS 500
12124
12125#define TG3_TSO_IP_HDR_LEN 20
12126#define TG3_TSO_TCP_HDR_LEN 20
12127#define TG3_TSO_TCP_OPT_LEN 12
12128
12129static const u8 tg3_tso_header[] = {
121300x08, 0x00,
121310x45, 0x00, 0x00, 0x00,
121320x00, 0x00, 0x40, 0x00,
121330x40, 0x06, 0x00, 0x00,
121340x0a, 0x00, 0x00, 0x01,
121350x0a, 0x00, 0x00, 0x02,
121360x0d, 0x00, 0xe0, 0x00,
121370x00, 0x00, 0x01, 0x00,
121380x00, 0x00, 0x02, 0x00,
121390x80, 0x10, 0x10, 0x00,
121400x14, 0x09, 0x00, 0x00,
121410x01, 0x01, 0x08, 0x0a,
121420x11, 0x11, 0x11, 0x11,
121430x11, 0x11, 0x11, 0x11,
12144};
Michael Chan9f40dea2005-09-05 17:53:06 -070012145
Matt Carlson28a45952011-08-19 13:58:22 +000012146static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
Michael Chanc76949a2005-05-29 14:58:59 -070012147{
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012148 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012149 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
Matt Carlson84b67b22011-07-27 14:20:52 +000012150 u32 budget;
Eric Dumazet9205fd92011-11-18 06:47:01 +000012151 struct sk_buff *skb;
12152 u8 *tx_data, *rx_data;
Michael Chanc76949a2005-05-29 14:58:59 -070012153 dma_addr_t map;
12154 int num_pkts, tx_len, rx_len, i, err;
12155 struct tg3_rx_buffer_desc *desc;
Matt Carlson898a56f2009-08-28 14:02:40 +000012156 struct tg3_napi *tnapi, *rnapi;
Matt Carlson8fea32b2010-09-15 08:59:58 +000012157 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
Michael Chanc76949a2005-05-29 14:58:59 -070012158
Matt Carlsonc8873402010-02-12 14:47:11 +000012159 tnapi = &tp->napi[0];
12160 rnapi = &tp->napi[0];
Matt Carlson0c1d0e22009-09-01 13:16:33 +000012161 if (tp->irq_cnt > 1) {
Joe Perches63c3a662011-04-26 08:12:10 +000012162 if (tg3_flag(tp, ENABLE_RSS))
Matt Carlson1da85aa2010-09-30 10:34:34 +000012163 rnapi = &tp->napi[1];
Joe Perches63c3a662011-04-26 08:12:10 +000012164 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc8873402010-02-12 14:47:11 +000012165 tnapi = &tp->napi[1];
Matt Carlson0c1d0e22009-09-01 13:16:33 +000012166 }
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012167 coal_now = tnapi->coal_now | rnapi->coal_now;
Matt Carlson898a56f2009-08-28 14:02:40 +000012168
Michael Chanc76949a2005-05-29 14:58:59 -070012169 err = -EIO;
12170
Matt Carlson4852a862011-04-13 11:05:07 +000012171 tx_len = pktsz;
David S. Millera20e9c62006-07-31 22:38:16 -070012172 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070012173 if (!skb)
12174 return -ENOMEM;
12175
Michael Chanc76949a2005-05-29 14:58:59 -070012176 tx_data = skb_put(skb, tx_len);
12177 memcpy(tx_data, tp->dev->dev_addr, 6);
12178 memset(tx_data + 6, 0x0, 8);
12179
Matt Carlson4852a862011-04-13 11:05:07 +000012180 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
Michael Chanc76949a2005-05-29 14:58:59 -070012181
Matt Carlson28a45952011-08-19 13:58:22 +000012182 if (tso_loopback) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012183 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12184
12185 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12186 TG3_TSO_TCP_OPT_LEN;
12187
12188 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12189 sizeof(tg3_tso_header));
12190 mss = TG3_TSO_MSS;
12191
12192 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12193 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12194
12195 /* Set the total length field in the IP header */
12196 iph->tot_len = htons((u16)(mss + hdr_len));
12197
12198 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12199 TXD_FLAG_CPU_POST_DMA);
12200
Joe Perches63c3a662011-04-26 08:12:10 +000012201 if (tg3_flag(tp, HW_TSO_1) ||
12202 tg3_flag(tp, HW_TSO_2) ||
12203 tg3_flag(tp, HW_TSO_3)) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012204 struct tcphdr *th;
12205 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12206 th = (struct tcphdr *)&tx_data[val];
12207 th->check = 0;
12208 } else
12209 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12210
Joe Perches63c3a662011-04-26 08:12:10 +000012211 if (tg3_flag(tp, HW_TSO_3)) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012212 mss |= (hdr_len & 0xc) << 12;
12213 if (hdr_len & 0x10)
12214 base_flags |= 0x00000010;
12215 base_flags |= (hdr_len & 0x3e0) << 5;
Joe Perches63c3a662011-04-26 08:12:10 +000012216 } else if (tg3_flag(tp, HW_TSO_2))
Matt Carlsonbb158d62011-04-25 12:42:47 +000012217 mss |= hdr_len << 9;
Joe Perches63c3a662011-04-26 08:12:10 +000012218 else if (tg3_flag(tp, HW_TSO_1) ||
Matt Carlsonbb158d62011-04-25 12:42:47 +000012219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12220 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12221 } else {
12222 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12223 }
12224
12225 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12226 } else {
12227 num_pkts = 1;
12228 data_off = ETH_HLEN;
Michael Chanc441b452012-03-04 14:48:13 +000012229
12230 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12231 tx_len > VLAN_ETH_FRAME_LEN)
12232 base_flags |= TXD_FLAG_JMB_PKT;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012233 }
12234
12235 for (i = data_off; i < tx_len; i++)
Michael Chanc76949a2005-05-29 14:58:59 -070012236 tx_data[i] = (u8) (i & 0xff);
12237
Alexander Duyckf4188d82009-12-02 16:48:38 +000012238 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12239 if (pci_dma_mapping_error(tp->pdev, map)) {
Matt Carlsona21771d2009-11-02 14:25:31 +000012240 dev_kfree_skb(skb);
12241 return -EIO;
12242 }
Michael Chanc76949a2005-05-29 14:58:59 -070012243
Matt Carlson0d681b22011-07-27 14:20:49 +000012244 val = tnapi->tx_prod;
12245 tnapi->tx_buffers[val].skb = skb;
12246 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12247
Michael Chanc76949a2005-05-29 14:58:59 -070012248 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012249 rnapi->coal_now);
Michael Chanc76949a2005-05-29 14:58:59 -070012250
12251 udelay(10);
12252
Matt Carlson898a56f2009-08-28 14:02:40 +000012253 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
Michael Chanc76949a2005-05-29 14:58:59 -070012254
Matt Carlson84b67b22011-07-27 14:20:52 +000012255 budget = tg3_tx_avail(tnapi);
12256 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
Matt Carlsond1a3b732011-07-27 14:20:51 +000012257 base_flags | TXD_FLAG_END, mss, 0)) {
12258 tnapi->tx_buffers[val].skb = NULL;
12259 dev_kfree_skb(skb);
12260 return -EIO;
12261 }
Michael Chanc76949a2005-05-29 14:58:59 -070012262
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012263 tnapi->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070012264
Michael Chan6541b802012-03-04 14:48:14 +000012265 /* Sync BD data before updating mailbox */
12266 wmb();
12267
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012268 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12269 tr32_mailbox(tnapi->prodmbox);
Michael Chanc76949a2005-05-29 14:58:59 -070012270
12271 udelay(10);
12272
Matt Carlson303fc922009-11-02 14:27:34 +000012273 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12274 for (i = 0; i < 35; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070012275 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012276 coal_now);
Michael Chanc76949a2005-05-29 14:58:59 -070012277
12278 udelay(10);
12279
Matt Carlson898a56f2009-08-28 14:02:40 +000012280 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12281 rx_idx = rnapi->hw_status->idx[0].rx_producer;
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012282 if ((tx_idx == tnapi->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070012283 (rx_idx == (rx_start_idx + num_pkts)))
12284 break;
12285 }
12286
Matt Carlsonba1142e2011-11-04 09:15:00 +000012287 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
Michael Chanc76949a2005-05-29 14:58:59 -070012288 dev_kfree_skb(skb);
12289
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012290 if (tx_idx != tnapi->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070012291 goto out;
12292
12293 if (rx_idx != rx_start_idx + num_pkts)
12294 goto out;
12295
Matt Carlsonbb158d62011-04-25 12:42:47 +000012296 val = data_off;
12297 while (rx_idx != rx_start_idx) {
12298 desc = &rnapi->rx_rcb[rx_start_idx++];
12299 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12300 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
Michael Chanc76949a2005-05-29 14:58:59 -070012301
Matt Carlsonbb158d62011-04-25 12:42:47 +000012302 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12303 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
Matt Carlson4852a862011-04-13 11:05:07 +000012304 goto out;
Michael Chanc76949a2005-05-29 14:58:59 -070012305
Matt Carlsonbb158d62011-04-25 12:42:47 +000012306 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12307 - ETH_FCS_LEN;
12308
Matt Carlson28a45952011-08-19 13:58:22 +000012309 if (!tso_loopback) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012310 if (rx_len != tx_len)
12311 goto out;
12312
12313 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12314 if (opaque_key != RXD_OPAQUE_RING_STD)
12315 goto out;
12316 } else {
12317 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12318 goto out;
12319 }
12320 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12321 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
Matt Carlson54e0a672011-05-19 12:12:50 +000012322 >> RXD_TCPCSUM_SHIFT != 0xffff) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012323 goto out;
12324 }
12325
12326 if (opaque_key == RXD_OPAQUE_RING_STD) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012327 rx_data = tpr->rx_std_buffers[desc_idx].data;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012328 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12329 mapping);
12330 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012331 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012332 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12333 mapping);
12334 } else
Matt Carlson4852a862011-04-13 11:05:07 +000012335 goto out;
12336
Matt Carlsonbb158d62011-04-25 12:42:47 +000012337 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12338 PCI_DMA_FROMDEVICE);
12339
Eric Dumazet9205fd92011-11-18 06:47:01 +000012340 rx_data += TG3_RX_OFFSET(tp);
Matt Carlsonbb158d62011-04-25 12:42:47 +000012341 for (i = data_off; i < rx_len; i++, val++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012342 if (*(rx_data + i) != (u8) (val & 0xff))
Matt Carlsonbb158d62011-04-25 12:42:47 +000012343 goto out;
12344 }
Matt Carlson4852a862011-04-13 11:05:07 +000012345 }
12346
Michael Chanc76949a2005-05-29 14:58:59 -070012347 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012348
Eric Dumazet9205fd92011-11-18 06:47:01 +000012349 /* tg3_free_rings will unmap and free the rx_data */
Michael Chanc76949a2005-05-29 14:58:59 -070012350out:
12351 return err;
12352}
12353
Matt Carlson00c266b2011-04-25 12:42:46 +000012354#define TG3_STD_LOOPBACK_FAILED 1
12355#define TG3_JMB_LOOPBACK_FAILED 2
Matt Carlsonbb158d62011-04-25 12:42:47 +000012356#define TG3_TSO_LOOPBACK_FAILED 4
Matt Carlson28a45952011-08-19 13:58:22 +000012357#define TG3_LOOPBACK_FAILED \
12358 (TG3_STD_LOOPBACK_FAILED | \
12359 TG3_JMB_LOOPBACK_FAILED | \
12360 TG3_TSO_LOOPBACK_FAILED)
Matt Carlson00c266b2011-04-25 12:42:46 +000012361
Matt Carlson941ec902011-08-19 13:58:23 +000012362static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
Michael Chan9f40dea2005-09-05 17:53:06 -070012363{
Matt Carlson28a45952011-08-19 13:58:22 +000012364 int err = -EIO;
Matt Carlson2215e242011-08-19 13:58:19 +000012365 u32 eee_cap;
Michael Chanc441b452012-03-04 14:48:13 +000012366 u32 jmb_pkt_sz = 9000;
12367
12368 if (tp->dma_limit)
12369 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
Michael Chan9f40dea2005-09-05 17:53:06 -070012370
Matt Carlsonab789042011-01-25 15:58:54 +000012371 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12372 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12373
Matt Carlson28a45952011-08-19 13:58:22 +000012374 if (!netif_running(tp->dev)) {
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012375 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12376 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012377 if (do_extlpbk)
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012378 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlson28a45952011-08-19 13:58:22 +000012379 goto done;
12380 }
12381
Michael Chanb9ec6c12006-07-25 16:37:27 -070012382 err = tg3_reset_hw(tp, 1);
Matt Carlsonab789042011-01-25 15:58:54 +000012383 if (err) {
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012384 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12385 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012386 if (do_extlpbk)
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012387 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlsonab789042011-01-25 15:58:54 +000012388 goto done;
12389 }
Michael Chan9f40dea2005-09-05 17:53:06 -070012390
Joe Perches63c3a662011-04-26 08:12:10 +000012391 if (tg3_flag(tp, ENABLE_RSS)) {
Matt Carlson4a85f092011-04-20 07:57:37 +000012392 int i;
12393
12394 /* Reroute all rx packets to the 1st queue */
12395 for (i = MAC_RSS_INDIR_TBL_0;
12396 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12397 tw32(i, 0x0);
12398 }
12399
Matt Carlson6e01b202011-08-19 13:58:20 +000012400 /* HW errata - mac loopback fails in some cases on 5780.
12401 * Normal traffic and PHY loopback are not affected by
12402 * errata. Also, the MAC loopback test is deprecated for
12403 * all newer ASIC revisions.
12404 */
12405 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12406 !tg3_flag(tp, CPMU_PRESENT)) {
12407 tg3_mac_loopback(tp, true);
Matt Carlson9936bcf2007-10-10 18:03:07 -070012408
Matt Carlson28a45952011-08-19 13:58:22 +000012409 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012410 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
Matt Carlson6e01b202011-08-19 13:58:20 +000012411
12412 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012413 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012414 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
Matt Carlson6e01b202011-08-19 13:58:20 +000012415
12416 tg3_mac_loopback(tp, false);
12417 }
Matt Carlson4852a862011-04-13 11:05:07 +000012418
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012419 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +000012420 !tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012421 int i;
12422
Matt Carlson941ec902011-08-19 13:58:23 +000012423 tg3_phy_lpbk_set(tp, 0, false);
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012424
12425 /* Wait for link */
12426 for (i = 0; i < 100; i++) {
12427 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12428 break;
12429 mdelay(1);
12430 }
12431
Matt Carlson28a45952011-08-19 13:58:22 +000012432 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012433 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
Joe Perches63c3a662011-04-26 08:12:10 +000012434 if (tg3_flag(tp, TSO_CAPABLE) &&
Matt Carlson28a45952011-08-19 13:58:22 +000012435 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012436 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
Joe Perches63c3a662011-04-26 08:12:10 +000012437 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012438 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012439 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070012440
Matt Carlson941ec902011-08-19 13:58:23 +000012441 if (do_extlpbk) {
12442 tg3_phy_lpbk_set(tp, 0, true);
12443
12444 /* All link indications report up, but the hardware
12445 * isn't really ready for about 20 msec. Double it
12446 * to be sure.
12447 */
12448 mdelay(40);
12449
12450 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012451 data[TG3_EXT_LOOPB_TEST] |=
12452 TG3_STD_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012453 if (tg3_flag(tp, TSO_CAPABLE) &&
12454 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012455 data[TG3_EXT_LOOPB_TEST] |=
12456 TG3_TSO_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012457 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012458 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012459 data[TG3_EXT_LOOPB_TEST] |=
12460 TG3_JMB_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012461 }
12462
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012463 /* Re-enable gphy autopowerdown. */
12464 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12465 tg3_phy_toggle_apd(tp, true);
12466 }
Matt Carlson6833c042008-11-21 17:18:59 -080012467
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012468 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12469 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
Matt Carlson28a45952011-08-19 13:58:22 +000012470
Matt Carlsonab789042011-01-25 15:58:54 +000012471done:
12472 tp->phy_flags |= eee_cap;
12473
Michael Chan9f40dea2005-09-05 17:53:06 -070012474 return err;
12475}
12476
Michael Chan4cafd3f2005-05-29 14:56:34 -070012477static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12478 u64 *data)
12479{
Michael Chan566f86a2005-05-29 14:56:58 -070012480 struct tg3 *tp = netdev_priv(dev);
Matt Carlson941ec902011-08-19 13:58:23 +000012481 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
Michael Chan566f86a2005-05-29 14:56:58 -070012482
Matt Carlsonbed98292011-07-13 09:27:29 +000012483 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12484 tg3_power_up(tp)) {
12485 etest->flags |= ETH_TEST_FL_FAILED;
12486 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12487 return;
12488 }
Michael Chanbc1c7562006-03-20 17:48:03 -080012489
Michael Chan566f86a2005-05-29 14:56:58 -070012490 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12491
12492 if (tg3_test_nvram(tp) != 0) {
12493 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012494 data[TG3_NVRAM_TEST] = 1;
Michael Chan566f86a2005-05-29 14:56:58 -070012495 }
Matt Carlson941ec902011-08-19 13:58:23 +000012496 if (!doextlpbk && tg3_test_link(tp)) {
Michael Chanca430072005-05-29 14:57:23 -070012497 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012498 data[TG3_LINK_TEST] = 1;
Michael Chanca430072005-05-29 14:57:23 -070012499 }
Michael Chana71116d2005-05-29 14:58:11 -070012500 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012501 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070012502
Michael Chanbbe832c2005-06-24 20:20:04 -070012503 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012504 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070012505 tg3_netif_stop(tp);
12506 irq_sync = 1;
12507 }
12508
12509 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070012510
12511 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080012512 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012513 tg3_halt_cpu(tp, RX_CPU_BASE);
Joe Perches63c3a662011-04-26 08:12:10 +000012514 if (!tg3_flag(tp, 5705_PLUS))
Michael Chana71116d2005-05-29 14:58:11 -070012515 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080012516 if (!err)
12517 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012518
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012519 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chand9ab5ad12006-03-20 22:27:35 -080012520 tg3_phy_reset(tp);
12521
Michael Chana71116d2005-05-29 14:58:11 -070012522 if (tg3_test_registers(tp) != 0) {
12523 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012524 data[TG3_REGISTER_TEST] = 1;
Michael Chana71116d2005-05-29 14:58:11 -070012525 }
Matt Carlson28a45952011-08-19 13:58:22 +000012526
Michael Chan7942e1d2005-05-29 14:58:36 -070012527 if (tg3_test_memory(tp) != 0) {
12528 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012529 data[TG3_MEMORY_TEST] = 1;
Michael Chan7942e1d2005-05-29 14:58:36 -070012530 }
Matt Carlson28a45952011-08-19 13:58:22 +000012531
Matt Carlson941ec902011-08-19 13:58:23 +000012532 if (doextlpbk)
12533 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12534
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012535 if (tg3_test_loopback(tp, data, doextlpbk))
Michael Chanc76949a2005-05-29 14:58:59 -070012536 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070012537
David S. Millerf47c11e2005-06-24 20:18:35 -070012538 tg3_full_unlock(tp);
12539
Michael Chand4bc3922005-05-29 14:59:20 -070012540 if (tg3_test_interrupt(tp) != 0) {
12541 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012542 data[TG3_INTERRUPT_TEST] = 1;
Michael Chand4bc3922005-05-29 14:59:20 -070012543 }
David S. Millerf47c11e2005-06-24 20:18:35 -070012544
12545 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070012546
Michael Chana71116d2005-05-29 14:58:11 -070012547 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12548 if (netif_running(dev)) {
Joe Perches63c3a662011-04-26 08:12:10 +000012549 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012550 err2 = tg3_restart_hw(tp, 1);
12551 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070012552 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012553 }
David S. Millerf47c11e2005-06-24 20:18:35 -070012554
12555 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012556
12557 if (irq_sync && !err2)
12558 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012559 }
Matt Carlson80096062010-08-02 11:26:06 +000012560 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000012561 tg3_power_down(tp);
Michael Chanbc1c7562006-03-20 17:48:03 -080012562
Michael Chan4cafd3f2005-05-29 14:56:34 -070012563}
12564
Linus Torvalds1da177e2005-04-16 15:20:36 -070012565static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12566{
12567 struct mii_ioctl_data *data = if_mii(ifr);
12568 struct tg3 *tp = netdev_priv(dev);
12569 int err;
12570
Joe Perches63c3a662011-04-26 08:12:10 +000012571 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000012572 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012573 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012574 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000012575 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Richard Cochran28b04112010-07-17 08:48:55 +000012576 return phy_mii_ioctl(phydev, ifr, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012577 }
12578
Matt Carlson33f401a2010-04-05 10:19:27 +000012579 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012580 case SIOCGMIIPHY:
Matt Carlson882e9792009-09-01 13:21:36 +000012581 data->phy_id = tp->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012582
12583 /* fallthru */
12584 case SIOCGMIIREG: {
12585 u32 mii_regval;
12586
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012587 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012588 break; /* We have no PHY */
12589
Matt Carlson34eea5a2011-04-20 07:57:38 +000012590 if (!netif_running(dev))
Michael Chanbc1c7562006-03-20 17:48:03 -080012591 return -EAGAIN;
12592
David S. Millerf47c11e2005-06-24 20:18:35 -070012593 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012594 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070012595 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012596
12597 data->val_out = mii_regval;
12598
12599 return err;
12600 }
12601
12602 case SIOCSMIIREG:
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012603 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012604 break; /* We have no PHY */
12605
Matt Carlson34eea5a2011-04-20 07:57:38 +000012606 if (!netif_running(dev))
Michael Chanbc1c7562006-03-20 17:48:03 -080012607 return -EAGAIN;
12608
David S. Millerf47c11e2005-06-24 20:18:35 -070012609 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012610 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070012611 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012612
12613 return err;
12614
12615 default:
12616 /* do nothing */
12617 break;
12618 }
12619 return -EOPNOTSUPP;
12620}
12621
David S. Miller15f98502005-05-18 22:49:26 -070012622static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12623{
12624 struct tg3 *tp = netdev_priv(dev);
12625
12626 memcpy(ec, &tp->coal, sizeof(*ec));
12627 return 0;
12628}
12629
Michael Chand244c892005-07-05 14:42:33 -070012630static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12631{
12632 struct tg3 *tp = netdev_priv(dev);
12633 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12634 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12635
Joe Perches63c3a662011-04-26 08:12:10 +000012636 if (!tg3_flag(tp, 5705_PLUS)) {
Michael Chand244c892005-07-05 14:42:33 -070012637 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12638 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12639 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12640 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12641 }
12642
12643 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12644 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12645 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12646 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12647 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12648 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12649 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12650 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12651 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12652 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12653 return -EINVAL;
12654
12655 /* No rx interrupts will be generated if both are zero */
12656 if ((ec->rx_coalesce_usecs == 0) &&
12657 (ec->rx_max_coalesced_frames == 0))
12658 return -EINVAL;
12659
12660 /* No tx interrupts will be generated if both are zero */
12661 if ((ec->tx_coalesce_usecs == 0) &&
12662 (ec->tx_max_coalesced_frames == 0))
12663 return -EINVAL;
12664
12665 /* Only copy relevant parameters, ignore all others. */
12666 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12667 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12668 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12669 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12670 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12671 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12672 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12673 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12674 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12675
12676 if (netif_running(dev)) {
12677 tg3_full_lock(tp, 0);
12678 __tg3_set_coalesce(tp, &tp->coal);
12679 tg3_full_unlock(tp);
12680 }
12681 return 0;
12682}
12683
Jeff Garzik7282d492006-09-13 14:30:00 -040012684static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012685 .get_settings = tg3_get_settings,
12686 .set_settings = tg3_set_settings,
12687 .get_drvinfo = tg3_get_drvinfo,
12688 .get_regs_len = tg3_get_regs_len,
12689 .get_regs = tg3_get_regs,
12690 .get_wol = tg3_get_wol,
12691 .set_wol = tg3_set_wol,
12692 .get_msglevel = tg3_get_msglevel,
12693 .set_msglevel = tg3_set_msglevel,
12694 .nway_reset = tg3_nway_reset,
12695 .get_link = ethtool_op_get_link,
12696 .get_eeprom_len = tg3_get_eeprom_len,
12697 .get_eeprom = tg3_get_eeprom,
12698 .set_eeprom = tg3_set_eeprom,
12699 .get_ringparam = tg3_get_ringparam,
12700 .set_ringparam = tg3_set_ringparam,
12701 .get_pauseparam = tg3_get_pauseparam,
12702 .set_pauseparam = tg3_set_pauseparam,
Michael Chan4cafd3f2005-05-29 14:56:34 -070012703 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012704 .get_strings = tg3_get_strings,
stephen hemminger81b87092011-04-04 08:43:50 +000012705 .set_phys_id = tg3_set_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012706 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070012707 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070012708 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070012709 .get_sset_count = tg3_get_sset_count,
Matt Carlson90415472011-12-16 13:33:23 +000012710 .get_rxnfc = tg3_get_rxnfc,
12711 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12712 .get_rxfh_indir = tg3_get_rxfh_indir,
12713 .set_rxfh_indir = tg3_set_rxfh_indir,
Michael Chan09681692012-09-28 07:12:42 +000012714 .get_channels = tg3_get_channels,
12715 .set_channels = tg3_set_channels,
Richard Cochran3f847492012-04-03 22:59:39 +000012716 .get_ts_info = ethtool_op_get_ts_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012717};
12718
David S. Millerb4017c52012-03-01 17:57:40 -050012719static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12720 struct rtnl_link_stats64 *stats)
12721{
12722 struct tg3 *tp = netdev_priv(dev);
12723
David S. Millerb4017c52012-03-01 17:57:40 -050012724 spin_lock_bh(&tp->lock);
Michael Chan0f566b22012-07-29 19:15:44 +000012725 if (!tp->hw_stats) {
12726 spin_unlock_bh(&tp->lock);
12727 return &tp->net_stats_prev;
12728 }
12729
David S. Millerb4017c52012-03-01 17:57:40 -050012730 tg3_get_nstats(tp, stats);
12731 spin_unlock_bh(&tp->lock);
12732
12733 return stats;
12734}
12735
Matt Carlsonccd5ba92012-02-13 10:20:08 +000012736static void tg3_set_rx_mode(struct net_device *dev)
12737{
12738 struct tg3 *tp = netdev_priv(dev);
12739
12740 if (!netif_running(dev))
12741 return;
12742
12743 tg3_full_lock(tp, 0);
12744 __tg3_set_rx_mode(dev);
12745 tg3_full_unlock(tp);
12746}
12747
Matt Carlsonfaf16272012-02-13 10:20:07 +000012748static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12749 int new_mtu)
12750{
12751 dev->mtu = new_mtu;
12752
12753 if (new_mtu > ETH_DATA_LEN) {
12754 if (tg3_flag(tp, 5780_CLASS)) {
12755 netdev_update_features(dev);
12756 tg3_flag_clear(tp, TSO_CAPABLE);
12757 } else {
12758 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12759 }
12760 } else {
12761 if (tg3_flag(tp, 5780_CLASS)) {
12762 tg3_flag_set(tp, TSO_CAPABLE);
12763 netdev_update_features(dev);
12764 }
12765 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12766 }
12767}
12768
12769static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12770{
12771 struct tg3 *tp = netdev_priv(dev);
Michael Chan2fae5e32012-03-04 14:48:15 +000012772 int err, reset_phy = 0;
Matt Carlsonfaf16272012-02-13 10:20:07 +000012773
12774 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12775 return -EINVAL;
12776
12777 if (!netif_running(dev)) {
12778 /* We'll just catch it later when the
12779 * device is up'd.
12780 */
12781 tg3_set_mtu(dev, tp, new_mtu);
12782 return 0;
12783 }
12784
12785 tg3_phy_stop(tp);
12786
12787 tg3_netif_stop(tp);
12788
12789 tg3_full_lock(tp, 1);
12790
12791 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12792
12793 tg3_set_mtu(dev, tp, new_mtu);
12794
Michael Chan2fae5e32012-03-04 14:48:15 +000012795 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12796 * breaks all requests to 256 bytes.
12797 */
12798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12799 reset_phy = 1;
12800
12801 err = tg3_restart_hw(tp, reset_phy);
Matt Carlsonfaf16272012-02-13 10:20:07 +000012802
12803 if (!err)
12804 tg3_netif_start(tp);
12805
12806 tg3_full_unlock(tp);
12807
12808 if (!err)
12809 tg3_phy_start(tp);
12810
12811 return err;
12812}
12813
12814static const struct net_device_ops tg3_netdev_ops = {
12815 .ndo_open = tg3_open,
12816 .ndo_stop = tg3_close,
12817 .ndo_start_xmit = tg3_start_xmit,
12818 .ndo_get_stats64 = tg3_get_stats64,
12819 .ndo_validate_addr = eth_validate_addr,
12820 .ndo_set_rx_mode = tg3_set_rx_mode,
12821 .ndo_set_mac_address = tg3_set_mac_addr,
12822 .ndo_do_ioctl = tg3_ioctl,
12823 .ndo_tx_timeout = tg3_tx_timeout,
12824 .ndo_change_mtu = tg3_change_mtu,
12825 .ndo_fix_features = tg3_fix_features,
12826 .ndo_set_features = tg3_set_features,
12827#ifdef CONFIG_NET_POLL_CONTROLLER
12828 .ndo_poll_controller = tg3_poll_controller,
12829#endif
12830};
12831
Linus Torvalds1da177e2005-04-16 15:20:36 -070012832static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12833{
Michael Chan1b277772006-03-20 22:27:48 -080012834 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012835
12836 tp->nvram_size = EEPROM_CHIP_SIZE;
12837
Matt Carlsone4f34112009-02-25 14:25:00 +000012838 if (tg3_nvram_read(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012839 return;
12840
Michael Chanb16250e2006-09-27 16:10:14 -070012841 if ((magic != TG3_EEPROM_MAGIC) &&
12842 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12843 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012844 return;
12845
12846 /*
12847 * Size the chip by reading offsets at increasing powers of two.
12848 * When we encounter our validation signature, we know the addressing
12849 * has wrapped around, and thus have our chip size.
12850 */
Michael Chan1b277772006-03-20 22:27:48 -080012851 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012852
12853 while (cursize < tp->nvram_size) {
Matt Carlsone4f34112009-02-25 14:25:00 +000012854 if (tg3_nvram_read(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012855 return;
12856
Michael Chan18201802006-03-20 22:29:15 -080012857 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012858 break;
12859
12860 cursize <<= 1;
12861 }
12862
12863 tp->nvram_size = cursize;
12864}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012865
Linus Torvalds1da177e2005-04-16 15:20:36 -070012866static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12867{
12868 u32 val;
12869
Joe Perches63c3a662011-04-26 08:12:10 +000012870 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080012871 return;
12872
12873 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080012874 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080012875 tg3_get_eeprom_size(tp);
12876 return;
12877 }
12878
Matt Carlson6d348f22009-02-25 14:25:52 +000012879 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012880 if (val != 0) {
Matt Carlson6d348f22009-02-25 14:25:52 +000012881 /* This is confusing. We want to operate on the
12882 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12883 * call will read from NVRAM and byteswap the data
12884 * according to the byteswapping settings for all
12885 * other register accesses. This ensures the data we
12886 * want will always reside in the lower 16-bits.
12887 * However, the data in NVRAM is in LE format, which
12888 * means the data from the NVRAM read will always be
12889 * opposite the endianness of the CPU. The 16-bit
12890 * byteswap then brings the data to CPU endianness.
12891 */
12892 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012893 return;
12894 }
12895 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070012896 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012897}
12898
12899static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12900{
12901 u32 nvcfg1;
12902
12903 nvcfg1 = tr32(NVRAM_CFG1);
12904 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
Joe Perches63c3a662011-04-26 08:12:10 +000012905 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000012906 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012907 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12908 tw32(NVRAM_CFG1, nvcfg1);
12909 }
12910
Matt Carlson6ff6f812011-05-19 12:12:54 +000012911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Joe Perches63c3a662011-04-26 08:12:10 +000012912 tg3_flag(tp, 5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012913 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000012914 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12915 tp->nvram_jedecnum = JEDEC_ATMEL;
12916 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012917 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012918 break;
12919 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12920 tp->nvram_jedecnum = JEDEC_ATMEL;
12921 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12922 break;
12923 case FLASH_VENDOR_ATMEL_EEPROM:
12924 tp->nvram_jedecnum = JEDEC_ATMEL;
12925 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012926 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012927 break;
12928 case FLASH_VENDOR_ST:
12929 tp->nvram_jedecnum = JEDEC_ST;
12930 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012931 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012932 break;
12933 case FLASH_VENDOR_SAIFUN:
12934 tp->nvram_jedecnum = JEDEC_SAIFUN;
12935 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12936 break;
12937 case FLASH_VENDOR_SST_SMALL:
12938 case FLASH_VENDOR_SST_LARGE:
12939 tp->nvram_jedecnum = JEDEC_SST;
12940 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12941 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012942 }
Matt Carlson8590a602009-08-28 12:29:16 +000012943 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012944 tp->nvram_jedecnum = JEDEC_ATMEL;
12945 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012946 tg3_flag_set(tp, NVRAM_BUFFERED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012947 }
12948}
12949
Matt Carlsona1b950d2009-09-01 13:20:17 +000012950static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12951{
12952 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12953 case FLASH_5752PAGE_SIZE_256:
12954 tp->nvram_pagesize = 256;
12955 break;
12956 case FLASH_5752PAGE_SIZE_512:
12957 tp->nvram_pagesize = 512;
12958 break;
12959 case FLASH_5752PAGE_SIZE_1K:
12960 tp->nvram_pagesize = 1024;
12961 break;
12962 case FLASH_5752PAGE_SIZE_2K:
12963 tp->nvram_pagesize = 2048;
12964 break;
12965 case FLASH_5752PAGE_SIZE_4K:
12966 tp->nvram_pagesize = 4096;
12967 break;
12968 case FLASH_5752PAGE_SIZE_264:
12969 tp->nvram_pagesize = 264;
12970 break;
12971 case FLASH_5752PAGE_SIZE_528:
12972 tp->nvram_pagesize = 528;
12973 break;
12974 }
12975}
12976
Michael Chan361b4ac2005-04-21 17:11:21 -070012977static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12978{
12979 u32 nvcfg1;
12980
12981 nvcfg1 = tr32(NVRAM_CFG1);
12982
Michael Chane6af3012005-04-21 17:12:05 -070012983 /* NVRAM protection for TPM */
12984 if (nvcfg1 & (1 << 27))
Joe Perches63c3a662011-04-26 08:12:10 +000012985 tg3_flag_set(tp, PROTECTED_NVRAM);
Michael Chane6af3012005-04-21 17:12:05 -070012986
Michael Chan361b4ac2005-04-21 17:11:21 -070012987 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000012988 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12989 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12990 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000012991 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012992 break;
12993 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12994 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000012995 tg3_flag_set(tp, NVRAM_BUFFERED);
12996 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000012997 break;
12998 case FLASH_5752VENDOR_ST_M45PE10:
12999 case FLASH_5752VENDOR_ST_M45PE20:
13000 case FLASH_5752VENDOR_ST_M45PE40:
13001 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013002 tg3_flag_set(tp, NVRAM_BUFFERED);
13003 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013004 break;
Michael Chan361b4ac2005-04-21 17:11:21 -070013005 }
13006
Joe Perches63c3a662011-04-26 08:12:10 +000013007 if (tg3_flag(tp, FLASH)) {
Matt Carlsona1b950d2009-09-01 13:20:17 +000013008 tg3_nvram_get_pagesize(tp, nvcfg1);
Matt Carlson8590a602009-08-28 12:29:16 +000013009 } else {
Michael Chan361b4ac2005-04-21 17:11:21 -070013010 /* For eeprom, set pagesize to maximum eeprom size */
13011 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13012
13013 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13014 tw32(NVRAM_CFG1, nvcfg1);
13015 }
13016}
13017
Michael Chand3c7b882006-03-23 01:28:25 -080013018static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
13019{
Matt Carlson989a9d22007-05-05 11:51:05 -070013020 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080013021
13022 nvcfg1 = tr32(NVRAM_CFG1);
13023
13024 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070013025 if (nvcfg1 & (1 << 27)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013026 tg3_flag_set(tp, PROTECTED_NVRAM);
Matt Carlson989a9d22007-05-05 11:51:05 -070013027 protect = 1;
13028 }
Michael Chand3c7b882006-03-23 01:28:25 -080013029
Matt Carlson989a9d22007-05-05 11:51:05 -070013030 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13031 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013032 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13033 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13034 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13035 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13036 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013037 tg3_flag_set(tp, NVRAM_BUFFERED);
13038 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013039 tp->nvram_pagesize = 264;
13040 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13041 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13042 tp->nvram_size = (protect ? 0x3e200 :
13043 TG3_NVRAM_SIZE_512KB);
13044 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13045 tp->nvram_size = (protect ? 0x1f200 :
13046 TG3_NVRAM_SIZE_256KB);
13047 else
13048 tp->nvram_size = (protect ? 0x1f200 :
13049 TG3_NVRAM_SIZE_128KB);
13050 break;
13051 case FLASH_5752VENDOR_ST_M45PE10:
13052 case FLASH_5752VENDOR_ST_M45PE20:
13053 case FLASH_5752VENDOR_ST_M45PE40:
13054 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013055 tg3_flag_set(tp, NVRAM_BUFFERED);
13056 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013057 tp->nvram_pagesize = 256;
13058 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13059 tp->nvram_size = (protect ?
13060 TG3_NVRAM_SIZE_64KB :
13061 TG3_NVRAM_SIZE_128KB);
13062 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13063 tp->nvram_size = (protect ?
13064 TG3_NVRAM_SIZE_64KB :
13065 TG3_NVRAM_SIZE_256KB);
13066 else
13067 tp->nvram_size = (protect ?
13068 TG3_NVRAM_SIZE_128KB :
13069 TG3_NVRAM_SIZE_512KB);
13070 break;
Michael Chand3c7b882006-03-23 01:28:25 -080013071 }
13072}
13073
Michael Chan1b277772006-03-20 22:27:48 -080013074static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13075{
13076 u32 nvcfg1;
13077
13078 nvcfg1 = tr32(NVRAM_CFG1);
13079
13080 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000013081 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13082 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13083 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13084 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13085 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013086 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000013087 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
Michael Chan1b277772006-03-20 22:27:48 -080013088
Matt Carlson8590a602009-08-28 12:29:16 +000013089 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13090 tw32(NVRAM_CFG1, nvcfg1);
13091 break;
13092 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13093 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13094 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13095 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13096 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013097 tg3_flag_set(tp, NVRAM_BUFFERED);
13098 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013099 tp->nvram_pagesize = 264;
13100 break;
13101 case FLASH_5752VENDOR_ST_M45PE10:
13102 case FLASH_5752VENDOR_ST_M45PE20:
13103 case FLASH_5752VENDOR_ST_M45PE40:
13104 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013105 tg3_flag_set(tp, NVRAM_BUFFERED);
13106 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013107 tp->nvram_pagesize = 256;
13108 break;
Michael Chan1b277772006-03-20 22:27:48 -080013109 }
13110}
13111
Matt Carlson6b91fa02007-10-10 18:01:09 -070013112static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13113{
13114 u32 nvcfg1, protect = 0;
13115
13116 nvcfg1 = tr32(NVRAM_CFG1);
13117
13118 /* NVRAM protection for TPM */
13119 if (nvcfg1 & (1 << 27)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013120 tg3_flag_set(tp, PROTECTED_NVRAM);
Matt Carlson6b91fa02007-10-10 18:01:09 -070013121 protect = 1;
13122 }
13123
13124 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13125 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013126 case FLASH_5761VENDOR_ATMEL_ADB021D:
13127 case FLASH_5761VENDOR_ATMEL_ADB041D:
13128 case FLASH_5761VENDOR_ATMEL_ADB081D:
13129 case FLASH_5761VENDOR_ATMEL_ADB161D:
13130 case FLASH_5761VENDOR_ATMEL_MDB021D:
13131 case FLASH_5761VENDOR_ATMEL_MDB041D:
13132 case FLASH_5761VENDOR_ATMEL_MDB081D:
13133 case FLASH_5761VENDOR_ATMEL_MDB161D:
13134 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013135 tg3_flag_set(tp, NVRAM_BUFFERED);
13136 tg3_flag_set(tp, FLASH);
13137 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson8590a602009-08-28 12:29:16 +000013138 tp->nvram_pagesize = 256;
13139 break;
13140 case FLASH_5761VENDOR_ST_A_M45PE20:
13141 case FLASH_5761VENDOR_ST_A_M45PE40:
13142 case FLASH_5761VENDOR_ST_A_M45PE80:
13143 case FLASH_5761VENDOR_ST_A_M45PE16:
13144 case FLASH_5761VENDOR_ST_M_M45PE20:
13145 case FLASH_5761VENDOR_ST_M_M45PE40:
13146 case FLASH_5761VENDOR_ST_M_M45PE80:
13147 case FLASH_5761VENDOR_ST_M_M45PE16:
13148 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013149 tg3_flag_set(tp, NVRAM_BUFFERED);
13150 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013151 tp->nvram_pagesize = 256;
13152 break;
Matt Carlson6b91fa02007-10-10 18:01:09 -070013153 }
13154
13155 if (protect) {
13156 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13157 } else {
13158 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013159 case FLASH_5761VENDOR_ATMEL_ADB161D:
13160 case FLASH_5761VENDOR_ATMEL_MDB161D:
13161 case FLASH_5761VENDOR_ST_A_M45PE16:
13162 case FLASH_5761VENDOR_ST_M_M45PE16:
13163 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13164 break;
13165 case FLASH_5761VENDOR_ATMEL_ADB081D:
13166 case FLASH_5761VENDOR_ATMEL_MDB081D:
13167 case FLASH_5761VENDOR_ST_A_M45PE80:
13168 case FLASH_5761VENDOR_ST_M_M45PE80:
13169 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13170 break;
13171 case FLASH_5761VENDOR_ATMEL_ADB041D:
13172 case FLASH_5761VENDOR_ATMEL_MDB041D:
13173 case FLASH_5761VENDOR_ST_A_M45PE40:
13174 case FLASH_5761VENDOR_ST_M_M45PE40:
13175 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13176 break;
13177 case FLASH_5761VENDOR_ATMEL_ADB021D:
13178 case FLASH_5761VENDOR_ATMEL_MDB021D:
13179 case FLASH_5761VENDOR_ST_A_M45PE20:
13180 case FLASH_5761VENDOR_ST_M_M45PE20:
13181 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13182 break;
Matt Carlson6b91fa02007-10-10 18:01:09 -070013183 }
13184 }
13185}
13186
Michael Chanb5d37722006-09-27 16:06:21 -070013187static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13188{
13189 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013190 tg3_flag_set(tp, NVRAM_BUFFERED);
Michael Chanb5d37722006-09-27 16:06:21 -070013191 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13192}
13193
Matt Carlson321d32a2008-11-21 17:22:19 -080013194static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13195{
13196 u32 nvcfg1;
13197
13198 nvcfg1 = tr32(NVRAM_CFG1);
13199
13200 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13201 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13202 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13203 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013204 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson321d32a2008-11-21 17:22:19 -080013205 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13206
13207 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13208 tw32(NVRAM_CFG1, nvcfg1);
13209 return;
13210 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13211 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13212 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13213 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13214 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13215 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13216 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13217 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013218 tg3_flag_set(tp, NVRAM_BUFFERED);
13219 tg3_flag_set(tp, FLASH);
Matt Carlson321d32a2008-11-21 17:22:19 -080013220
13221 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13222 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13223 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13224 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13225 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13226 break;
13227 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13228 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13229 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13230 break;
13231 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13232 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13233 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13234 break;
13235 }
13236 break;
13237 case FLASH_5752VENDOR_ST_M45PE10:
13238 case FLASH_5752VENDOR_ST_M45PE20:
13239 case FLASH_5752VENDOR_ST_M45PE40:
13240 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013241 tg3_flag_set(tp, NVRAM_BUFFERED);
13242 tg3_flag_set(tp, FLASH);
Matt Carlson321d32a2008-11-21 17:22:19 -080013243
13244 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13245 case FLASH_5752VENDOR_ST_M45PE10:
13246 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13247 break;
13248 case FLASH_5752VENDOR_ST_M45PE20:
13249 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13250 break;
13251 case FLASH_5752VENDOR_ST_M45PE40:
13252 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13253 break;
13254 }
13255 break;
13256 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013257 tg3_flag_set(tp, NO_NVRAM);
Matt Carlson321d32a2008-11-21 17:22:19 -080013258 return;
13259 }
13260
Matt Carlsona1b950d2009-09-01 13:20:17 +000013261 tg3_nvram_get_pagesize(tp, nvcfg1);
13262 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013263 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013264}
13265
13266
13267static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13268{
13269 u32 nvcfg1;
13270
13271 nvcfg1 = tr32(NVRAM_CFG1);
13272
13273 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13274 case FLASH_5717VENDOR_ATMEL_EEPROM:
13275 case FLASH_5717VENDOR_MICRO_EEPROM:
13276 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013277 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013278 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13279
13280 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13281 tw32(NVRAM_CFG1, nvcfg1);
13282 return;
13283 case FLASH_5717VENDOR_ATMEL_MDB011D:
13284 case FLASH_5717VENDOR_ATMEL_ADB011B:
13285 case FLASH_5717VENDOR_ATMEL_ADB011D:
13286 case FLASH_5717VENDOR_ATMEL_MDB021D:
13287 case FLASH_5717VENDOR_ATMEL_ADB021B:
13288 case FLASH_5717VENDOR_ATMEL_ADB021D:
13289 case FLASH_5717VENDOR_ATMEL_45USPT:
13290 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013291 tg3_flag_set(tp, NVRAM_BUFFERED);
13292 tg3_flag_set(tp, FLASH);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013293
13294 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13295 case FLASH_5717VENDOR_ATMEL_MDB021D:
Matt Carlson66ee33b2011-04-05 14:22:51 +000013296 /* Detect size with tg3_nvram_get_size() */
13297 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013298 case FLASH_5717VENDOR_ATMEL_ADB021B:
13299 case FLASH_5717VENDOR_ATMEL_ADB021D:
13300 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13301 break;
13302 default:
13303 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13304 break;
13305 }
Matt Carlson321d32a2008-11-21 17:22:19 -080013306 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013307 case FLASH_5717VENDOR_ST_M_M25PE10:
13308 case FLASH_5717VENDOR_ST_A_M25PE10:
13309 case FLASH_5717VENDOR_ST_M_M45PE10:
13310 case FLASH_5717VENDOR_ST_A_M45PE10:
13311 case FLASH_5717VENDOR_ST_M_M25PE20:
13312 case FLASH_5717VENDOR_ST_A_M25PE20:
13313 case FLASH_5717VENDOR_ST_M_M45PE20:
13314 case FLASH_5717VENDOR_ST_A_M45PE20:
13315 case FLASH_5717VENDOR_ST_25USPT:
13316 case FLASH_5717VENDOR_ST_45USPT:
13317 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013318 tg3_flag_set(tp, NVRAM_BUFFERED);
13319 tg3_flag_set(tp, FLASH);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013320
13321 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13322 case FLASH_5717VENDOR_ST_M_M25PE20:
Matt Carlsona1b950d2009-09-01 13:20:17 +000013323 case FLASH_5717VENDOR_ST_M_M45PE20:
Matt Carlson66ee33b2011-04-05 14:22:51 +000013324 /* Detect size with tg3_nvram_get_size() */
13325 break;
13326 case FLASH_5717VENDOR_ST_A_M25PE20:
Matt Carlsona1b950d2009-09-01 13:20:17 +000013327 case FLASH_5717VENDOR_ST_A_M45PE20:
13328 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13329 break;
13330 default:
13331 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13332 break;
13333 }
Matt Carlson321d32a2008-11-21 17:22:19 -080013334 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013335 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013336 tg3_flag_set(tp, NO_NVRAM);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013337 return;
Matt Carlson321d32a2008-11-21 17:22:19 -080013338 }
Matt Carlsona1b950d2009-09-01 13:20:17 +000013339
13340 tg3_nvram_get_pagesize(tp, nvcfg1);
13341 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013342 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson321d32a2008-11-21 17:22:19 -080013343}
13344
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013345static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13346{
13347 u32 nvcfg1, nvmpinstrp;
13348
13349 nvcfg1 = tr32(NVRAM_CFG1);
13350 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13351
13352 switch (nvmpinstrp) {
13353 case FLASH_5720_EEPROM_HD:
13354 case FLASH_5720_EEPROM_LD:
13355 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013356 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013357
13358 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13359 tw32(NVRAM_CFG1, nvcfg1);
13360 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13361 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13362 else
13363 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13364 return;
13365 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13366 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13367 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13368 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13369 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13370 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13371 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13372 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13373 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13374 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13375 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13376 case FLASH_5720VENDOR_ATMEL_45USPT:
13377 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013378 tg3_flag_set(tp, NVRAM_BUFFERED);
13379 tg3_flag_set(tp, FLASH);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013380
13381 switch (nvmpinstrp) {
13382 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13383 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13384 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13385 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13386 break;
13387 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13388 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13389 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13390 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13391 break;
13392 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13393 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13394 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13395 break;
13396 default:
13397 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13398 break;
13399 }
13400 break;
13401 case FLASH_5720VENDOR_M_ST_M25PE10:
13402 case FLASH_5720VENDOR_M_ST_M45PE10:
13403 case FLASH_5720VENDOR_A_ST_M25PE10:
13404 case FLASH_5720VENDOR_A_ST_M45PE10:
13405 case FLASH_5720VENDOR_M_ST_M25PE20:
13406 case FLASH_5720VENDOR_M_ST_M45PE20:
13407 case FLASH_5720VENDOR_A_ST_M25PE20:
13408 case FLASH_5720VENDOR_A_ST_M45PE20:
13409 case FLASH_5720VENDOR_M_ST_M25PE40:
13410 case FLASH_5720VENDOR_M_ST_M45PE40:
13411 case FLASH_5720VENDOR_A_ST_M25PE40:
13412 case FLASH_5720VENDOR_A_ST_M45PE40:
13413 case FLASH_5720VENDOR_M_ST_M25PE80:
13414 case FLASH_5720VENDOR_M_ST_M45PE80:
13415 case FLASH_5720VENDOR_A_ST_M25PE80:
13416 case FLASH_5720VENDOR_A_ST_M45PE80:
13417 case FLASH_5720VENDOR_ST_25USPT:
13418 case FLASH_5720VENDOR_ST_45USPT:
13419 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013420 tg3_flag_set(tp, NVRAM_BUFFERED);
13421 tg3_flag_set(tp, FLASH);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013422
13423 switch (nvmpinstrp) {
13424 case FLASH_5720VENDOR_M_ST_M25PE20:
13425 case FLASH_5720VENDOR_M_ST_M45PE20:
13426 case FLASH_5720VENDOR_A_ST_M25PE20:
13427 case FLASH_5720VENDOR_A_ST_M45PE20:
13428 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13429 break;
13430 case FLASH_5720VENDOR_M_ST_M25PE40:
13431 case FLASH_5720VENDOR_M_ST_M45PE40:
13432 case FLASH_5720VENDOR_A_ST_M25PE40:
13433 case FLASH_5720VENDOR_A_ST_M45PE40:
13434 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13435 break;
13436 case FLASH_5720VENDOR_M_ST_M25PE80:
13437 case FLASH_5720VENDOR_M_ST_M45PE80:
13438 case FLASH_5720VENDOR_A_ST_M25PE80:
13439 case FLASH_5720VENDOR_A_ST_M45PE80:
13440 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13441 break;
13442 default:
13443 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13444 break;
13445 }
13446 break;
13447 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013448 tg3_flag_set(tp, NO_NVRAM);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013449 return;
13450 }
13451
13452 tg3_nvram_get_pagesize(tp, nvcfg1);
13453 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013454 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013455}
13456
Linus Torvalds1da177e2005-04-16 15:20:36 -070013457/* Chips other than 5700/5701 use the NVRAM for fetching info. */
13458static void __devinit tg3_nvram_init(struct tg3 *tp)
13459{
Linus Torvalds1da177e2005-04-16 15:20:36 -070013460 tw32_f(GRC_EEPROM_ADDR,
13461 (EEPROM_ADDR_FSM_RESET |
13462 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13463 EEPROM_ADDR_CLKPERD_SHIFT)));
13464
Michael Chan9d57f012006-12-07 00:23:25 -080013465 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013466
13467 /* Enable seeprom accesses. */
13468 tw32_f(GRC_LOCAL_CTRL,
13469 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13470 udelay(100);
13471
13472 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13473 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
Joe Perches63c3a662011-04-26 08:12:10 +000013474 tg3_flag_set(tp, NVRAM);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013475
Michael Chanec41c7d2006-01-17 02:40:55 -080013476 if (tg3_nvram_lock(tp)) {
Matt Carlson5129c3a2010-04-05 10:19:23 +000013477 netdev_warn(tp->dev,
13478 "Cannot get nvram lock, %s failed\n",
Joe Perches05dbe002010-02-17 19:44:19 +000013479 __func__);
Michael Chanec41c7d2006-01-17 02:40:55 -080013480 return;
13481 }
Michael Chane6af3012005-04-21 17:12:05 -070013482 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013483
Matt Carlson989a9d22007-05-05 11:51:05 -070013484 tp->nvram_size = 0;
13485
Michael Chan361b4ac2005-04-21 17:11:21 -070013486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13487 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080013488 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13489 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070013490 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080013493 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070013494 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13495 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070013496 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13497 tg3_get_5906_nvram_info(tp);
Matt Carlsonb703df62009-12-03 08:36:21 +000013498 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
Matt Carlson55086ad2011-12-14 11:09:59 +000013499 tg3_flag(tp, 57765_CLASS))
Matt Carlson321d32a2008-11-21 17:22:19 -080013500 tg3_get_57780_nvram_info(tp);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013501 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
Matt Carlsona1b950d2009-09-01 13:20:17 +000013503 tg3_get_5717_nvram_info(tp);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013504 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13505 tg3_get_5720_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070013506 else
13507 tg3_get_nvram_info(tp);
13508
Matt Carlson989a9d22007-05-05 11:51:05 -070013509 if (tp->nvram_size == 0)
13510 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013511
Michael Chane6af3012005-04-21 17:12:05 -070013512 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080013513 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013514
13515 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000013516 tg3_flag_clear(tp, NVRAM);
13517 tg3_flag_clear(tp, NVRAM_BUFFERED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013518
13519 tg3_get_eeprom_size(tp);
13520 }
13521}
13522
Linus Torvalds1da177e2005-04-16 15:20:36 -070013523struct subsys_tbl_ent {
13524 u16 subsys_vendor, subsys_devid;
13525 u32 phy_id;
13526};
13527
Matt Carlson24daf2b2010-02-17 15:17:02 +000013528static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013529 /* Broadcom boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013530 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013531 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013532 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013533 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013534 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013535 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013536 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13537 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13538 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013539 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013540 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013541 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013542 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13543 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13544 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013545 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013546 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013547 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013548 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013549 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013550 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013551 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013552
13553 /* 3com boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013554 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013555 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013556 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013557 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013558 { TG3PCI_SUBVENDOR_ID_3COM,
13559 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13560 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013561 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013562 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013563 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013564
13565 /* DELL boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013566 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013567 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013568 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013569 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013570 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013571 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013572 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013573 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013574
13575 /* Compaq boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013576 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013577 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013578 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013579 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013580 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13581 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13582 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013583 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013584 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013585 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013586
13587 /* IBM boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013588 { TG3PCI_SUBVENDOR_ID_IBM,
13589 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013590};
13591
Matt Carlson24daf2b2010-02-17 15:17:02 +000013592static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013593{
13594 int i;
13595
13596 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13597 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13598 tp->pdev->subsystem_vendor) &&
13599 (subsys_id_to_phy_id[i].subsys_devid ==
13600 tp->pdev->subsystem_device))
13601 return &subsys_id_to_phy_id[i];
13602 }
13603 return NULL;
13604}
13605
Michael Chan7d0c41e2005-04-21 17:06:20 -070013606static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013607{
Linus Torvalds1da177e2005-04-16 15:20:36 -070013608 u32 val;
David S. Millerf49639e2006-06-09 11:58:36 -070013609
Matt Carlson79eb6902010-02-17 15:17:03 +000013610 tp->phy_id = TG3_PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070013611 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13612
Gary Zambranoa85feb82007-05-05 11:52:19 -070013613 /* Assume an onboard device and WOL capable by default. */
Joe Perches63c3a662011-04-26 08:12:10 +000013614 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13615 tg3_flag_set(tp, WOL_CAP);
David S. Miller72b845e2006-03-14 14:11:48 -080013616
Michael Chanb5d37722006-09-27 16:06:21 -070013617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080013618 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013619 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13620 tg3_flag_set(tp, IS_NIC);
Michael Chan9d26e212006-12-07 00:21:14 -080013621 }
Matt Carlson0527ba32007-10-10 18:03:30 -070013622 val = tr32(VCPU_CFGSHDW);
13623 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Joe Perches63c3a662011-04-26 08:12:10 +000013624 tg3_flag_set(tp, ASPM_WORKAROUND);
Matt Carlson0527ba32007-10-10 18:03:30 -070013625 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013626 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013627 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013628 device_set_wakeup_enable(&tp->pdev->dev, true);
13629 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080013630 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070013631 }
13632
Linus Torvalds1da177e2005-04-16 15:20:36 -070013633 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13634 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13635 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070013636 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070013637 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013638
13639 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13640 tp->nic_sram_data_cfg = nic_cfg;
13641
13642 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13643 ver >>= NIC_SRAM_DATA_VER_SHIFT;
Matt Carlson6ff6f812011-05-19 12:12:54 +000013644 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13645 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13646 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070013647 (ver > 0) && (ver < 0x100))
13648 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13649
Matt Carlsona9daf362008-05-25 23:49:44 -070013650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13651 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13652
Linus Torvalds1da177e2005-04-16 15:20:36 -070013653 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13654 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13655 eeprom_phy_serdes = 1;
13656
13657 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13658 if (nic_phy_id != 0) {
13659 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13660 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13661
13662 eeprom_phy_id = (id1 >> 16) << 10;
13663 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13664 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13665 } else
13666 eeprom_phy_id = 0;
13667
Michael Chan7d0c41e2005-04-21 17:06:20 -070013668 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070013669 if (eeprom_phy_serdes) {
Joe Perches63c3a662011-04-26 08:12:10 +000013670 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013671 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Matt Carlsona50d0792010-06-05 17:24:37 +000013672 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013673 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
Michael Chan747e8f82005-07-25 12:33:22 -070013674 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070013675
Joe Perches63c3a662011-04-26 08:12:10 +000013676 if (tg3_flag(tp, 5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -070013677 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13678 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070013679 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070013680 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13681
13682 switch (led_cfg) {
13683 default:
13684 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13685 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13686 break;
13687
13688 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13689 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13690 break;
13691
13692 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13693 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070013694
13695 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13696 * read on some older 5700/5701 bootcode.
13697 */
13698 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13699 ASIC_REV_5700 ||
13700 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13701 ASIC_REV_5701)
13702 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13703
Linus Torvalds1da177e2005-04-16 15:20:36 -070013704 break;
13705
13706 case SHASTA_EXT_LED_SHARED:
13707 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13708 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13709 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13710 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13711 LED_CTRL_MODE_PHY_2);
13712 break;
13713
13714 case SHASTA_EXT_LED_MAC:
13715 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13716 break;
13717
13718 case SHASTA_EXT_LED_COMBO:
13719 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13720 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13721 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13722 LED_CTRL_MODE_PHY_2);
13723 break;
13724
Stephen Hemminger855e1112008-04-16 16:37:28 -070013725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013726
13727 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13729 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13730 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13731
Matt Carlsonb2a5c192008-04-03 21:44:44 -070013732 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13733 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080013734
Michael Chan9d26e212006-12-07 00:21:14 -080013735 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Joe Perches63c3a662011-04-26 08:12:10 +000013736 tg3_flag_set(tp, EEPROM_WRITE_PROT);
Michael Chan9d26e212006-12-07 00:21:14 -080013737 if ((tp->pdev->subsystem_vendor ==
13738 PCI_VENDOR_ID_ARIMA) &&
13739 (tp->pdev->subsystem_device == 0x205a ||
13740 tp->pdev->subsystem_device == 0x2063))
Joe Perches63c3a662011-04-26 08:12:10 +000013741 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
Michael Chan9d26e212006-12-07 00:21:14 -080013742 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000013743 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13744 tg3_flag_set(tp, IS_NIC);
Michael Chan9d26e212006-12-07 00:21:14 -080013745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013746
13747 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
Joe Perches63c3a662011-04-26 08:12:10 +000013748 tg3_flag_set(tp, ENABLE_ASF);
13749 if (tg3_flag(tp, 5750_PLUS))
13750 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013751 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080013752
13753 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
Joe Perches63c3a662011-04-26 08:12:10 +000013754 tg3_flag(tp, 5750_PLUS))
13755 tg3_flag_set(tp, ENABLE_APE);
Matt Carlsonb2b98d42008-11-03 16:52:32 -080013756
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013757 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
Gary Zambranoa85feb82007-05-05 11:52:19 -070013758 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
Joe Perches63c3a662011-04-26 08:12:10 +000013759 tg3_flag_clear(tp, WOL_CAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013760
Joe Perches63c3a662011-04-26 08:12:10 +000013761 if (tg3_flag(tp, WOL_CAP) &&
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013762 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013763 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013764 device_set_wakeup_enable(&tp->pdev->dev, true);
13765 }
Matt Carlson0527ba32007-10-10 18:03:30 -070013766
Linus Torvalds1da177e2005-04-16 15:20:36 -070013767 if (cfg2 & (1 << 17))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013768 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013769
13770 /* serdes signal pre-emphasis in register 0x590 set by */
13771 /* bootcode if bit 18 is set */
13772 if (cfg2 & (1 << 18))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013773 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070013774
Joe Perches63c3a662011-04-26 08:12:10 +000013775 if ((tg3_flag(tp, 57765_PLUS) ||
13776 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13777 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
Matt Carlson6833c042008-11-21 17:18:59 -080013778 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013779 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
Matt Carlson6833c042008-11-21 17:18:59 -080013780
Joe Perches63c3a662011-04-26 08:12:10 +000013781 if (tg3_flag(tp, PCI_EXPRESS) &&
Matt Carlson8c69b1e2010-08-02 11:26:00 +000013782 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +000013783 !tg3_flag(tp, 57765_PLUS)) {
Matt Carlson8ed5d972007-05-07 00:25:49 -070013784 u32 cfg3;
13785
13786 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13787 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
Joe Perches63c3a662011-04-26 08:12:10 +000013788 tg3_flag_set(tp, ASPM_WORKAROUND);
Matt Carlson8ed5d972007-05-07 00:25:49 -070013789 }
Matt Carlsona9daf362008-05-25 23:49:44 -070013790
Matt Carlson14417062010-02-17 15:16:59 +000013791 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
Joe Perches63c3a662011-04-26 08:12:10 +000013792 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
Matt Carlsona9daf362008-05-25 23:49:44 -070013793 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
Joe Perches63c3a662011-04-26 08:12:10 +000013794 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
Matt Carlsona9daf362008-05-25 23:49:44 -070013795 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
Joe Perches63c3a662011-04-26 08:12:10 +000013796 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013797 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080013798done:
Joe Perches63c3a662011-04-26 08:12:10 +000013799 if (tg3_flag(tp, WOL_CAP))
Rafael J. Wysocki43067ed2011-02-10 06:53:09 +000013800 device_set_wakeup_enable(&tp->pdev->dev,
Joe Perches63c3a662011-04-26 08:12:10 +000013801 tg3_flag(tp, WOL_ENABLE));
Rafael J. Wysocki43067ed2011-02-10 06:53:09 +000013802 else
13803 device_set_wakeup_capable(&tp->pdev->dev, false);
Michael Chan7d0c41e2005-04-21 17:06:20 -070013804}
13805
Matt Carlsonb2a5c192008-04-03 21:44:44 -070013806static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13807{
13808 int i;
13809 u32 val;
13810
13811 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13812 tw32(OTP_CTRL, cmd);
13813
13814 /* Wait for up to 1 ms for command to execute. */
13815 for (i = 0; i < 100; i++) {
13816 val = tr32(OTP_STATUS);
13817 if (val & OTP_STATUS_CMD_DONE)
13818 break;
13819 udelay(10);
13820 }
13821
13822 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13823}
13824
13825/* Read the gphy configuration from the OTP region of the chip. The gphy
13826 * configuration is a 32-bit value that straddles the alignment boundary.
13827 * We do two 32-bit reads and then shift and merge the results.
13828 */
13829static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13830{
13831 u32 bhalf_otp, thalf_otp;
13832
13833 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13834
13835 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13836 return 0;
13837
13838 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13839
13840 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13841 return 0;
13842
13843 thalf_otp = tr32(OTP_READ_DATA);
13844
13845 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13846
13847 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13848 return 0;
13849
13850 bhalf_otp = tr32(OTP_READ_DATA);
13851
13852 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13853}
13854
Matt Carlsone256f8a2011-03-09 16:58:24 +000013855static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13856{
Hiroaki SHIMODA202ff1c2011-11-22 04:05:41 +000013857 u32 adv = ADVERTISED_Autoneg;
Matt Carlsone256f8a2011-03-09 16:58:24 +000013858
13859 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13860 adv |= ADVERTISED_1000baseT_Half |
13861 ADVERTISED_1000baseT_Full;
13862
13863 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13864 adv |= ADVERTISED_100baseT_Half |
13865 ADVERTISED_100baseT_Full |
13866 ADVERTISED_10baseT_Half |
13867 ADVERTISED_10baseT_Full |
13868 ADVERTISED_TP;
13869 else
13870 adv |= ADVERTISED_FIBRE;
13871
13872 tp->link_config.advertising = adv;
Matt Carlsone7405222012-02-13 15:20:16 +000013873 tp->link_config.speed = SPEED_UNKNOWN;
13874 tp->link_config.duplex = DUPLEX_UNKNOWN;
Matt Carlsone256f8a2011-03-09 16:58:24 +000013875 tp->link_config.autoneg = AUTONEG_ENABLE;
Matt Carlsone7405222012-02-13 15:20:16 +000013876 tp->link_config.active_speed = SPEED_UNKNOWN;
13877 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
Matt Carlson34655ad2012-02-22 12:35:18 +000013878
13879 tp->old_link = -1;
Matt Carlsone256f8a2011-03-09 16:58:24 +000013880}
13881
Michael Chan7d0c41e2005-04-21 17:06:20 -070013882static int __devinit tg3_phy_probe(struct tg3 *tp)
13883{
13884 u32 hw_phy_id_1, hw_phy_id_2;
13885 u32 hw_phy_id, hw_phy_id_masked;
13886 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013887
Matt Carlsone256f8a2011-03-09 16:58:24 +000013888 /* flow control autonegotiation is default behavior */
Joe Perches63c3a662011-04-26 08:12:10 +000013889 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlsone256f8a2011-03-09 16:58:24 +000013890 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13891
Michael Chan8151ad52012-07-29 19:15:41 +000013892 if (tg3_flag(tp, ENABLE_APE)) {
13893 switch (tp->pci_fn) {
13894 case 0:
13895 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13896 break;
13897 case 1:
13898 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13899 break;
13900 case 2:
13901 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13902 break;
13903 case 3:
13904 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13905 break;
13906 }
13907 }
13908
Joe Perches63c3a662011-04-26 08:12:10 +000013909 if (tg3_flag(tp, USE_PHYLIB))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013910 return tg3_phy_init(tp);
13911
Linus Torvalds1da177e2005-04-16 15:20:36 -070013912 /* Reading the PHY ID register can conflict with ASF
Nick Andrew877d0312009-01-26 11:06:57 +010013913 * firmware access to the PHY hardware.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013914 */
13915 err = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000013916 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
Matt Carlson79eb6902010-02-17 15:17:03 +000013917 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013918 } else {
13919 /* Now read the physical PHY_ID from the chip and verify
13920 * that it is sane. If it doesn't look good, we fall back
13921 * to either the hard-coded table based PHY_ID and failing
13922 * that the value found in the eeprom area.
13923 */
13924 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13925 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13926
13927 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13928 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13929 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13930
Matt Carlson79eb6902010-02-17 15:17:03 +000013931 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013932 }
13933
Matt Carlson79eb6902010-02-17 15:17:03 +000013934 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013935 tp->phy_id = hw_phy_id;
Matt Carlson79eb6902010-02-17 15:17:03 +000013936 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013937 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070013938 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013939 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013940 } else {
Matt Carlson79eb6902010-02-17 15:17:03 +000013941 if (tp->phy_id != TG3_PHY_ID_INVALID) {
Michael Chan7d0c41e2005-04-21 17:06:20 -070013942 /* Do nothing, phy ID already set up in
13943 * tg3_get_eeprom_hw_cfg().
13944 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013945 } else {
13946 struct subsys_tbl_ent *p;
13947
13948 /* No eeprom signature? Try the hardcoded
13949 * subsys device table.
13950 */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013951 p = tg3_lookup_by_subsys(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013952 if (!p)
13953 return -ENODEV;
13954
13955 tp->phy_id = p->phy_id;
13956 if (!tp->phy_id ||
Matt Carlson79eb6902010-02-17 15:17:03 +000013957 tp->phy_id == TG3_PHY_ID_BCM8002)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013958 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013959 }
13960 }
13961
Matt Carlsona6b68da2010-12-06 08:28:52 +000013962 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
Matt Carlson5baa5e92011-07-20 10:20:53 +000013963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13965 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
Matt Carlsona6b68da2010-12-06 08:28:52 +000013966 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13967 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13968 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
Matt Carlson52b02d02010-10-14 10:37:41 +000013969 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13970
Matt Carlsone256f8a2011-03-09 16:58:24 +000013971 tg3_phy_init_link_config(tp);
13972
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013973 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +000013974 !tg3_flag(tp, ENABLE_APE) &&
13975 !tg3_flag(tp, ENABLE_ASF)) {
Matt Carlsone2bf73e2011-12-08 14:40:15 +000013976 u32 bmsr, dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013977
13978 tg3_readphy(tp, MII_BMSR, &bmsr);
13979 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13980 (bmsr & BMSR_LSTATUS))
13981 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040013982
Linus Torvalds1da177e2005-04-16 15:20:36 -070013983 err = tg3_phy_reset(tp);
13984 if (err)
13985 return err;
13986
Matt Carlson42b64a42011-05-19 12:12:49 +000013987 tg3_phy_set_wirespeed(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013988
Matt Carlsone2bf73e2011-12-08 14:40:15 +000013989 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
Matt Carlson42b64a42011-05-19 12:12:49 +000013990 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13991 tp->link_config.flowctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013992
13993 tg3_writephy(tp, MII_BMCR,
13994 BMCR_ANENABLE | BMCR_ANRESTART);
13995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013996 }
13997
13998skip_phy_reset:
Matt Carlson79eb6902010-02-17 15:17:03 +000013999 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070014000 err = tg3_init_5401phy_dsp(tp);
14001 if (err)
14002 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014003
Linus Torvalds1da177e2005-04-16 15:20:36 -070014004 err = tg3_init_5401phy_dsp(tp);
14005 }
14006
Linus Torvalds1da177e2005-04-16 15:20:36 -070014007 return err;
14008}
14009
Matt Carlson184b8902010-04-05 10:19:25 +000014010static void __devinit tg3_read_vpd(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014011{
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014012 u8 *vpd_data;
Matt Carlson4181b2c2010-02-26 14:04:45 +000014013 unsigned int block_end, rosize, len;
Matt Carlson535a4902011-07-20 10:20:56 +000014014 u32 vpdlen;
Matt Carlson184b8902010-04-05 10:19:25 +000014015 int j, i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014016
Matt Carlson535a4902011-07-20 10:20:56 +000014017 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014018 if (!vpd_data)
14019 goto out_no_vpd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014020
Matt Carlson535a4902011-07-20 10:20:56 +000014021 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
Matt Carlson4181b2c2010-02-26 14:04:45 +000014022 if (i < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014023 goto out_not_found;
Matt Carlson4181b2c2010-02-26 14:04:45 +000014024
14025 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14026 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14027 i += PCI_VPD_LRDT_TAG_SIZE;
14028
Matt Carlson535a4902011-07-20 10:20:56 +000014029 if (block_end > vpdlen)
Matt Carlson4181b2c2010-02-26 14:04:45 +000014030 goto out_not_found;
14031
Matt Carlson184b8902010-04-05 10:19:25 +000014032 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14033 PCI_VPD_RO_KEYWORD_MFR_ID);
14034 if (j > 0) {
14035 len = pci_vpd_info_field_size(&vpd_data[j]);
14036
14037 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14038 if (j + len > block_end || len != 4 ||
14039 memcmp(&vpd_data[j], "1028", 4))
14040 goto partno;
14041
14042 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14043 PCI_VPD_RO_KEYWORD_VENDOR0);
14044 if (j < 0)
14045 goto partno;
14046
14047 len = pci_vpd_info_field_size(&vpd_data[j]);
14048
14049 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14050 if (j + len > block_end)
14051 goto partno;
14052
14053 memcpy(tp->fw_ver, &vpd_data[j], len);
Matt Carlson535a4902011-07-20 10:20:56 +000014054 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
Matt Carlson184b8902010-04-05 10:19:25 +000014055 }
14056
14057partno:
Matt Carlson4181b2c2010-02-26 14:04:45 +000014058 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14059 PCI_VPD_RO_KEYWORD_PARTNO);
14060 if (i < 0)
14061 goto out_not_found;
14062
14063 len = pci_vpd_info_field_size(&vpd_data[i]);
14064
14065 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14066 if (len > TG3_BPN_SIZE ||
Matt Carlson535a4902011-07-20 10:20:56 +000014067 (len + i) > vpdlen)
Matt Carlson4181b2c2010-02-26 14:04:45 +000014068 goto out_not_found;
14069
14070 memcpy(tp->board_part_number, &vpd_data[i], len);
14071
Linus Torvalds1da177e2005-04-16 15:20:36 -070014072out_not_found:
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014073 kfree(vpd_data);
Matt Carlson37a949c2010-09-30 10:34:33 +000014074 if (tp->board_part_number[0])
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014075 return;
14076
14077out_no_vpd:
Matt Carlson37a949c2010-09-30 10:34:33 +000014078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
Michael Chan79d49692012-11-05 14:26:29 +000014079 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
Matt Carlson37a949c2010-09-30 10:34:33 +000014081 strcpy(tp->board_part_number, "BCM5717");
14082 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14083 strcpy(tp->board_part_number, "BCM5718");
14084 else
14085 goto nomatch;
14086 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14087 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14088 strcpy(tp->board_part_number, "BCM57780");
14089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14090 strcpy(tp->board_part_number, "BCM57760");
14091 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14092 strcpy(tp->board_part_number, "BCM57790");
14093 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14094 strcpy(tp->board_part_number, "BCM57788");
14095 else
14096 goto nomatch;
14097 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14098 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14099 strcpy(tp->board_part_number, "BCM57761");
14100 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14101 strcpy(tp->board_part_number, "BCM57765");
14102 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14103 strcpy(tp->board_part_number, "BCM57781");
14104 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14105 strcpy(tp->board_part_number, "BCM57785");
14106 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14107 strcpy(tp->board_part_number, "BCM57791");
14108 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14109 strcpy(tp->board_part_number, "BCM57795");
14110 else
14111 goto nomatch;
Matt Carlson55086ad2011-12-14 11:09:59 +000014112 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14113 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14114 strcpy(tp->board_part_number, "BCM57762");
14115 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14116 strcpy(tp->board_part_number, "BCM57766");
14117 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14118 strcpy(tp->board_part_number, "BCM57782");
14119 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14120 strcpy(tp->board_part_number, "BCM57786");
14121 else
14122 goto nomatch;
Matt Carlson37a949c2010-09-30 10:34:33 +000014123 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb5d37722006-09-27 16:06:21 -070014124 strcpy(tp->board_part_number, "BCM95906");
Matt Carlson37a949c2010-09-30 10:34:33 +000014125 } else {
14126nomatch:
Michael Chanb5d37722006-09-27 16:06:21 -070014127 strcpy(tp->board_part_number, "none");
Matt Carlson37a949c2010-09-30 10:34:33 +000014128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014129}
14130
Matt Carlson9c8a6202007-10-21 16:16:08 -070014131static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14132{
14133 u32 val;
14134
Matt Carlsone4f34112009-02-25 14:25:00 +000014135 if (tg3_nvram_read(tp, offset, &val) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014136 (val & 0xfc000000) != 0x0c000000 ||
Matt Carlsone4f34112009-02-25 14:25:00 +000014137 tg3_nvram_read(tp, offset + 4, &val) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014138 val != 0)
14139 return 0;
14140
14141 return 1;
14142}
14143
Matt Carlsonacd9c112009-02-25 14:26:33 +000014144static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14145{
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014146 u32 val, offset, start, ver_offset;
Matt Carlson75f99362010-04-05 10:19:24 +000014147 int i, dst_off;
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014148 bool newver = false;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014149
14150 if (tg3_nvram_read(tp, 0xc, &offset) ||
14151 tg3_nvram_read(tp, 0x4, &start))
14152 return;
14153
14154 offset = tg3_nvram_logical_addr(tp, offset);
14155
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014156 if (tg3_nvram_read(tp, offset, &val))
Matt Carlsonacd9c112009-02-25 14:26:33 +000014157 return;
14158
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014159 if ((val & 0xfc000000) == 0x0c000000) {
14160 if (tg3_nvram_read(tp, offset + 4, &val))
Matt Carlsonacd9c112009-02-25 14:26:33 +000014161 return;
14162
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014163 if (val == 0)
14164 newver = true;
14165 }
14166
Matt Carlson75f99362010-04-05 10:19:24 +000014167 dst_off = strlen(tp->fw_ver);
14168
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014169 if (newver) {
Matt Carlson75f99362010-04-05 10:19:24 +000014170 if (TG3_VER_SIZE - dst_off < 16 ||
14171 tg3_nvram_read(tp, offset + 8, &ver_offset))
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014172 return;
14173
14174 offset = offset + ver_offset - start;
14175 for (i = 0; i < 16; i += 4) {
14176 __be32 v;
14177 if (tg3_nvram_read_be32(tp, offset + i, &v))
14178 return;
14179
Matt Carlson75f99362010-04-05 10:19:24 +000014180 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014181 }
14182 } else {
14183 u32 major, minor;
14184
14185 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14186 return;
14187
14188 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14189 TG3_NVM_BCVER_MAJSFT;
14190 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
Matt Carlson75f99362010-04-05 10:19:24 +000014191 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14192 "v%d.%02d", major, minor);
Matt Carlsonacd9c112009-02-25 14:26:33 +000014193 }
14194}
14195
Matt Carlsona6f6cb12009-02-25 14:27:43 +000014196static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14197{
14198 u32 val, major, minor;
14199
14200 /* Use native endian representation */
14201 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14202 return;
14203
14204 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14205 TG3_NVM_HWSB_CFG1_MAJSFT;
14206 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14207 TG3_NVM_HWSB_CFG1_MINSFT;
14208
14209 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14210}
14211
Matt Carlsondfe00d72008-11-21 17:19:41 -080014212static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14213{
14214 u32 offset, major, minor, build;
14215
Matt Carlson75f99362010-04-05 10:19:24 +000014216 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
Matt Carlsondfe00d72008-11-21 17:19:41 -080014217
14218 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14219 return;
14220
14221 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14222 case TG3_EEPROM_SB_REVISION_0:
14223 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14224 break;
14225 case TG3_EEPROM_SB_REVISION_2:
14226 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14227 break;
14228 case TG3_EEPROM_SB_REVISION_3:
14229 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14230 break;
Matt Carlsona4153d42010-02-17 15:16:56 +000014231 case TG3_EEPROM_SB_REVISION_4:
14232 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14233 break;
14234 case TG3_EEPROM_SB_REVISION_5:
14235 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14236 break;
Matt Carlsonbba226a2010-10-14 10:37:38 +000014237 case TG3_EEPROM_SB_REVISION_6:
14238 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14239 break;
Matt Carlsondfe00d72008-11-21 17:19:41 -080014240 default:
14241 return;
14242 }
14243
Matt Carlsone4f34112009-02-25 14:25:00 +000014244 if (tg3_nvram_read(tp, offset, &val))
Matt Carlsondfe00d72008-11-21 17:19:41 -080014245 return;
14246
14247 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14248 TG3_EEPROM_SB_EDH_BLD_SHFT;
14249 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14250 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14251 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14252
14253 if (minor > 99 || build > 26)
14254 return;
14255
Matt Carlson75f99362010-04-05 10:19:24 +000014256 offset = strlen(tp->fw_ver);
14257 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14258 " v%d.%02d", major, minor);
Matt Carlsondfe00d72008-11-21 17:19:41 -080014259
14260 if (build > 0) {
Matt Carlson75f99362010-04-05 10:19:24 +000014261 offset = strlen(tp->fw_ver);
14262 if (offset < TG3_VER_SIZE - 1)
14263 tp->fw_ver[offset] = 'a' + build - 1;
Matt Carlsondfe00d72008-11-21 17:19:41 -080014264 }
14265}
14266
Matt Carlsonacd9c112009-02-25 14:26:33 +000014267static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
Michael Chanc4e65752006-03-20 22:29:32 -080014268{
14269 u32 val, offset, start;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014270 int i, vlen;
Matt Carlson9c8a6202007-10-21 16:16:08 -070014271
14272 for (offset = TG3_NVM_DIR_START;
14273 offset < TG3_NVM_DIR_END;
14274 offset += TG3_NVM_DIRENT_SIZE) {
Matt Carlsone4f34112009-02-25 14:25:00 +000014275 if (tg3_nvram_read(tp, offset, &val))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014276 return;
14277
14278 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14279 break;
14280 }
14281
14282 if (offset == TG3_NVM_DIR_END)
14283 return;
14284
Joe Perches63c3a662011-04-26 08:12:10 +000014285 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014286 start = 0x08000000;
Matt Carlsone4f34112009-02-25 14:25:00 +000014287 else if (tg3_nvram_read(tp, offset - 4, &start))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014288 return;
14289
Matt Carlsone4f34112009-02-25 14:25:00 +000014290 if (tg3_nvram_read(tp, offset + 4, &offset) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014291 !tg3_fw_img_is_valid(tp, offset) ||
Matt Carlsone4f34112009-02-25 14:25:00 +000014292 tg3_nvram_read(tp, offset + 8, &val))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014293 return;
14294
14295 offset += val - start;
14296
Matt Carlsonacd9c112009-02-25 14:26:33 +000014297 vlen = strlen(tp->fw_ver);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014298
Matt Carlsonacd9c112009-02-25 14:26:33 +000014299 tp->fw_ver[vlen++] = ',';
14300 tp->fw_ver[vlen++] = ' ';
Matt Carlson9c8a6202007-10-21 16:16:08 -070014301
14302 for (i = 0; i < 4; i++) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000014303 __be32 v;
14304 if (tg3_nvram_read_be32(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014305 return;
14306
Al Virob9fc7dc2007-12-17 22:59:57 -080014307 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014308
Matt Carlsonacd9c112009-02-25 14:26:33 +000014309 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14310 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014311 break;
14312 }
14313
Matt Carlsonacd9c112009-02-25 14:26:33 +000014314 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14315 vlen += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014316 }
Matt Carlsonacd9c112009-02-25 14:26:33 +000014317}
14318
Michael Chan165f4d12012-07-16 16:23:59 +000014319static void __devinit tg3_probe_ncsi(struct tg3 *tp)
Matt Carlson7fd76442009-02-25 14:27:20 +000014320{
Matt Carlson7fd76442009-02-25 14:27:20 +000014321 u32 apedata;
Matt Carlson7fd76442009-02-25 14:27:20 +000014322
14323 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14324 if (apedata != APE_SEG_SIG_MAGIC)
14325 return;
14326
14327 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14328 if (!(apedata & APE_FW_STATUS_READY))
14329 return;
14330
Michael Chan165f4d12012-07-16 16:23:59 +000014331 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14332 tg3_flag_set(tp, APE_HAS_NCSI);
14333}
14334
14335static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14336{
14337 int vlen;
14338 u32 apedata;
14339 char *fwtype;
14340
Matt Carlson7fd76442009-02-25 14:27:20 +000014341 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14342
Michael Chan165f4d12012-07-16 16:23:59 +000014343 if (tg3_flag(tp, APE_HAS_NCSI))
Matt Carlsonecc79642010-08-02 11:26:01 +000014344 fwtype = "NCSI";
Michael Chan165f4d12012-07-16 16:23:59 +000014345 else
Matt Carlsonecc79642010-08-02 11:26:01 +000014346 fwtype = "DASH";
14347
Matt Carlson7fd76442009-02-25 14:27:20 +000014348 vlen = strlen(tp->fw_ver);
14349
Matt Carlsonecc79642010-08-02 11:26:01 +000014350 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14351 fwtype,
Matt Carlson7fd76442009-02-25 14:27:20 +000014352 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14353 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14354 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14355 (apedata & APE_FW_VERSION_BLDMSK));
14356}
14357
Matt Carlsonacd9c112009-02-25 14:26:33 +000014358static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14359{
14360 u32 val;
Matt Carlson75f99362010-04-05 10:19:24 +000014361 bool vpd_vers = false;
14362
14363 if (tp->fw_ver[0] != 0)
14364 vpd_vers = true;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014365
Joe Perches63c3a662011-04-26 08:12:10 +000014366 if (tg3_flag(tp, NO_NVRAM)) {
Matt Carlson75f99362010-04-05 10:19:24 +000014367 strcat(tp->fw_ver, "sb");
Matt Carlsondf259d82009-04-20 06:57:14 +000014368 return;
14369 }
14370
Matt Carlsonacd9c112009-02-25 14:26:33 +000014371 if (tg3_nvram_read(tp, 0, &val))
14372 return;
14373
14374 if (val == TG3_EEPROM_MAGIC)
14375 tg3_read_bc_ver(tp);
14376 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14377 tg3_read_sb_ver(tp, val);
Matt Carlsona6f6cb12009-02-25 14:27:43 +000014378 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14379 tg3_read_hwsb_ver(tp);
Matt Carlsonacd9c112009-02-25 14:26:33 +000014380
Michael Chan165f4d12012-07-16 16:23:59 +000014381 if (tg3_flag(tp, ENABLE_ASF)) {
14382 if (tg3_flag(tp, ENABLE_APE)) {
14383 tg3_probe_ncsi(tp);
14384 if (!vpd_vers)
14385 tg3_read_dash_ver(tp);
14386 } else if (!vpd_vers) {
14387 tg3_read_mgmtfw_ver(tp);
14388 }
Matt Carlsonc9cab242011-07-13 09:27:27 +000014389 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070014390
14391 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080014392}
14393
Matt Carlson7cb32cf2010-09-30 10:34:36 +000014394static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14395{
Joe Perches63c3a662011-04-26 08:12:10 +000014396 if (tg3_flag(tp, LRG_PROD_RING_CAP))
Matt Carlsonde9f5232011-04-05 14:22:43 +000014397 return TG3_RX_RET_MAX_SIZE_5717;
Joe Perches63c3a662011-04-26 08:12:10 +000014398 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
Matt Carlsonde9f5232011-04-05 14:22:43 +000014399 return TG3_RX_RET_MAX_SIZE_5700;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000014400 else
Matt Carlsonde9f5232011-04-05 14:22:43 +000014401 return TG3_RX_RET_MAX_SIZE_5705;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000014402}
14403
Matt Carlson41434702011-03-09 16:58:22 +000014404static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
Joe Perches895950c2010-12-21 02:16:08 -080014405 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14406 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14407 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14408 { },
14409};
14410
Matt Carlson16c7fa72012-02-13 10:20:10 +000014411static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14412{
14413 struct pci_dev *peer;
14414 unsigned int func, devnr = tp->pdev->devfn & ~7;
14415
14416 for (func = 0; func < 8; func++) {
14417 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14418 if (peer && peer != tp->pdev)
14419 break;
14420 pci_dev_put(peer);
14421 }
14422 /* 5704 can be configured in single-port mode, set peer to
14423 * tp->pdev in that case.
14424 */
14425 if (!peer) {
14426 peer = tp->pdev;
14427 return peer;
14428 }
14429
14430 /*
14431 * We don't need to keep the refcount elevated; there's no way
14432 * to remove one half of this device without removing the other
14433 */
14434 pci_dev_put(peer);
14435
14436 return peer;
14437}
14438
Matt Carlson42b123b2012-02-13 15:20:13 +000014439static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14440{
14441 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14443 u32 reg;
14444
14445 /* All devices that use the alternate
14446 * ASIC REV location have a CPMU.
14447 */
14448 tg3_flag_set(tp, CPMU_PRESENT);
14449
14450 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
Michael Chan79d49692012-11-05 14:26:29 +000014451 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
Matt Carlson42b123b2012-02-13 15:20:13 +000014452 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14453 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14454 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14455 reg = TG3PCI_GEN2_PRODID_ASICREV;
14456 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14458 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14459 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14460 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14461 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14462 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14463 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14464 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14465 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14466 reg = TG3PCI_GEN15_PRODID_ASICREV;
14467 else
14468 reg = TG3PCI_PRODID_ASICREV;
14469
14470 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14471 }
14472
14473 /* Wrong chip ID in 5752 A0. This code can be removed later
14474 * as A0 is not in production.
14475 */
14476 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14477 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14478
Michael Chan79d49692012-11-05 14:26:29 +000014479 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14480 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14481
Matt Carlson42b123b2012-02-13 15:20:13 +000014482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14485 tg3_flag_set(tp, 5717_PLUS);
14486
14487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14489 tg3_flag_set(tp, 57765_CLASS);
14490
14491 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14492 tg3_flag_set(tp, 57765_PLUS);
14493
14494 /* Intentionally exclude ASIC_REV_5906 */
14495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14500 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14501 tg3_flag(tp, 57765_PLUS))
14502 tg3_flag_set(tp, 5755_PLUS);
14503
14504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14506 tg3_flag_set(tp, 5780_CLASS);
14507
14508 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14511 tg3_flag(tp, 5755_PLUS) ||
14512 tg3_flag(tp, 5780_CLASS))
14513 tg3_flag_set(tp, 5750_PLUS);
14514
14515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14516 tg3_flag(tp, 5750_PLUS))
14517 tg3_flag_set(tp, 5705_PLUS);
14518}
14519
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000014520static bool tg3_10_100_only_device(struct tg3 *tp,
14521 const struct pci_device_id *ent)
14522{
14523 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14524
14525 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14526 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14527 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14528 return true;
14529
14530 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14532 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14533 return true;
14534 } else {
14535 return true;
14536 }
14537 }
14538
14539 return false;
14540}
14541
14542static int __devinit tg3_get_invariants(struct tg3 *tp,
14543 const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014544{
Linus Torvalds1da177e2005-04-16 15:20:36 -070014545 u32 misc_ctrl_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014546 u32 pci_state_reg, grc_misc_cfg;
14547 u32 val;
14548 u16 pci_cmd;
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014549 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014550
Linus Torvalds1da177e2005-04-16 15:20:36 -070014551 /* Force memory write invalidate off. If we leave it on,
14552 * then on 5700_BX chips we have to enable a workaround.
14553 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14554 * to match the cacheline size. The Broadcom driver have this
14555 * workaround but turns MWI off all the times so never uses
14556 * it. This seems to suggest that the workaround is insufficient.
14557 */
14558 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14559 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14560 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14561
Matt Carlson16821282011-07-13 09:27:28 +000014562 /* Important! -- Make sure register accesses are byteswapped
14563 * correctly. Also, for those chips that require it, make
14564 * sure that indirect register accesses are enabled before
14565 * the first operation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014566 */
14567 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14568 &misc_ctrl_reg);
Matt Carlson16821282011-07-13 09:27:28 +000014569 tp->misc_host_ctrl |= (misc_ctrl_reg &
14570 MISC_HOST_CTRL_CHIPREV);
14571 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14572 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014573
Matt Carlson42b123b2012-02-13 15:20:13 +000014574 tg3_detect_asic_rev(tp, misc_ctrl_reg);
Michael Chanff645be2005-04-21 17:09:53 -070014575
Michael Chan68929142005-08-09 20:17:14 -070014576 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14577 * we need to disable memory and use config. cycles
14578 * only to access all registers. The 5702/03 chips
14579 * can mistakenly decode the special cycles from the
14580 * ICH chipsets as memory write cycles, causing corruption
14581 * of register and memory space. Only certain ICH bridges
14582 * will drive special cycles with non-zero data during the
14583 * address phase which can fall within the 5703's address
14584 * range. This is not an ICH bug as the PCI spec allows
14585 * non-zero address during special cycles. However, only
14586 * these ICH bridges are known to drive non-zero addresses
14587 * during special cycles.
14588 *
14589 * Since special cycles do not cross PCI bridges, we only
14590 * enable this workaround if the 5703 is on the secondary
14591 * bus of these ICH bridges.
14592 */
14593 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14594 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14595 static struct tg3_dev_id {
14596 u32 vendor;
14597 u32 device;
14598 u32 rev;
14599 } ich_chipsets[] = {
14600 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14601 PCI_ANY_ID },
14602 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14603 PCI_ANY_ID },
14604 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14605 0xa },
14606 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14607 PCI_ANY_ID },
14608 { },
14609 };
14610 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14611 struct pci_dev *bridge = NULL;
14612
14613 while (pci_id->vendor != 0) {
14614 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14615 bridge);
14616 if (!bridge) {
14617 pci_id++;
14618 continue;
14619 }
14620 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070014621 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070014622 continue;
14623 }
14624 if (bridge->subordinate &&
14625 (bridge->subordinate->number ==
14626 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014627 tg3_flag_set(tp, ICH_WORKAROUND);
Michael Chan68929142005-08-09 20:17:14 -070014628 pci_dev_put(bridge);
14629 break;
14630 }
14631 }
14632 }
14633
Matt Carlson6ff6f812011-05-19 12:12:54 +000014634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Matt Carlson41588ba2008-04-19 18:12:33 -070014635 static struct tg3_dev_id {
14636 u32 vendor;
14637 u32 device;
14638 } bridge_chipsets[] = {
14639 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14640 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14641 { },
14642 };
14643 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14644 struct pci_dev *bridge = NULL;
14645
14646 while (pci_id->vendor != 0) {
14647 bridge = pci_get_device(pci_id->vendor,
14648 pci_id->device,
14649 bridge);
14650 if (!bridge) {
14651 pci_id++;
14652 continue;
14653 }
14654 if (bridge->subordinate &&
14655 (bridge->subordinate->number <=
14656 tp->pdev->bus->number) &&
Yinghai Lub918c622012-05-17 18:51:11 -070014657 (bridge->subordinate->busn_res.end >=
Matt Carlson41588ba2008-04-19 18:12:33 -070014658 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014659 tg3_flag_set(tp, 5701_DMA_BUG);
Matt Carlson41588ba2008-04-19 18:12:33 -070014660 pci_dev_put(bridge);
14661 break;
14662 }
14663 }
14664 }
14665
Michael Chan4a29cc22006-03-19 13:21:12 -080014666 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14667 * DMA addresses > 40-bit. This bridge may have other additional
14668 * 57xx devices behind it in some 4-port NIC designs for example.
14669 * Any tg3 device found behind the bridge will also need the 40-bit
14670 * DMA workaround.
14671 */
Matt Carlson42b123b2012-02-13 15:20:13 +000014672 if (tg3_flag(tp, 5780_CLASS)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014673 tg3_flag_set(tp, 40BIT_DMA_BUG);
Michael Chan4cf78e42005-07-25 12:29:19 -070014674 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Matt Carlson859a588792010-04-05 10:19:28 +000014675 } else {
Michael Chan4a29cc22006-03-19 13:21:12 -080014676 struct pci_dev *bridge = NULL;
14677
14678 do {
14679 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14680 PCI_DEVICE_ID_SERVERWORKS_EPB,
14681 bridge);
14682 if (bridge && bridge->subordinate &&
14683 (bridge->subordinate->number <=
14684 tp->pdev->bus->number) &&
Yinghai Lub918c622012-05-17 18:51:11 -070014685 (bridge->subordinate->busn_res.end >=
Michael Chan4a29cc22006-03-19 13:21:12 -080014686 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014687 tg3_flag_set(tp, 40BIT_DMA_BUG);
Michael Chan4a29cc22006-03-19 13:21:12 -080014688 pci_dev_put(bridge);
14689 break;
14690 }
14691 } while (bridge);
14692 }
Michael Chan4cf78e42005-07-25 12:29:19 -070014693
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000014694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
Matt Carlson3a1e19d2011-07-13 09:27:32 +000014695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
Michael Chan7544b092007-05-05 13:08:32 -070014696 tp->pdev_peer = tg3_find_peer(tp);
14697
Matt Carlson507399f2009-11-13 13:03:37 +000014698 /* Determine TSO capabilities */
Matt Carlsona0512942011-07-27 14:20:54 +000014699 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
Matt Carlson4d163b72011-01-25 15:58:48 +000014700 ; /* Do nothing. HW bug. */
Joe Perches63c3a662011-04-26 08:12:10 +000014701 else if (tg3_flag(tp, 57765_PLUS))
14702 tg3_flag_set(tp, HW_TSO_3);
14703 else if (tg3_flag(tp, 5755_PLUS) ||
Matt Carlsone849cdc2009-11-13 13:03:38 +000014704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Joe Perches63c3a662011-04-26 08:12:10 +000014705 tg3_flag_set(tp, HW_TSO_2);
14706 else if (tg3_flag(tp, 5750_PLUS)) {
14707 tg3_flag_set(tp, HW_TSO_1);
14708 tg3_flag_set(tp, TSO_BUG);
Matt Carlson507399f2009-11-13 13:03:37 +000014709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14710 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Joe Perches63c3a662011-04-26 08:12:10 +000014711 tg3_flag_clear(tp, TSO_BUG);
Matt Carlson507399f2009-11-13 13:03:37 +000014712 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14713 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14714 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +000014715 tg3_flag_set(tp, TSO_BUG);
Matt Carlson507399f2009-11-13 13:03:37 +000014716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14717 tp->fw_needed = FIRMWARE_TG3TSO5;
14718 else
14719 tp->fw_needed = FIRMWARE_TG3TSO;
14720 }
14721
Matt Carlsondabc5c62011-05-19 12:12:52 +000014722 /* Selectively allow TSO based on operating conditions */
Matt Carlson6ff6f812011-05-19 12:12:54 +000014723 if (tg3_flag(tp, HW_TSO_1) ||
14724 tg3_flag(tp, HW_TSO_2) ||
14725 tg3_flag(tp, HW_TSO_3) ||
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000014726 tp->fw_needed) {
14727 /* For firmware TSO, assume ASF is disabled.
14728 * We'll disable TSO later if we discover ASF
14729 * is enabled in tg3_get_eeprom_hw_cfg().
14730 */
Matt Carlsondabc5c62011-05-19 12:12:52 +000014731 tg3_flag_set(tp, TSO_CAPABLE);
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000014732 } else {
Matt Carlsondabc5c62011-05-19 12:12:52 +000014733 tg3_flag_clear(tp, TSO_CAPABLE);
14734 tg3_flag_clear(tp, TSO_BUG);
14735 tp->fw_needed = NULL;
14736 }
14737
14738 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14739 tp->fw_needed = FIRMWARE_TG3;
14740
Matt Carlson507399f2009-11-13 13:03:37 +000014741 tp->irq_max = 1;
14742
Joe Perches63c3a662011-04-26 08:12:10 +000014743 if (tg3_flag(tp, 5750_PLUS)) {
14744 tg3_flag_set(tp, SUPPORT_MSI);
Michael Chan7544b092007-05-05 13:08:32 -070014745 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14746 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14747 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14748 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14749 tp->pdev_peer == tp->pdev))
Joe Perches63c3a662011-04-26 08:12:10 +000014750 tg3_flag_clear(tp, SUPPORT_MSI);
Michael Chan7544b092007-05-05 13:08:32 -070014751
Joe Perches63c3a662011-04-26 08:12:10 +000014752 if (tg3_flag(tp, 5755_PLUS) ||
Michael Chanb5d37722006-09-27 16:06:21 -070014753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Joe Perches63c3a662011-04-26 08:12:10 +000014754 tg3_flag_set(tp, 1SHOT_MSI);
Michael Chan52c0fd82006-06-29 20:15:54 -070014755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014756
Joe Perches63c3a662011-04-26 08:12:10 +000014757 if (tg3_flag(tp, 57765_PLUS)) {
14758 tg3_flag_set(tp, SUPPORT_MSIX);
Matt Carlson507399f2009-11-13 13:03:37 +000014759 tp->irq_max = TG3_IRQ_MAX_VECS;
14760 }
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000014761 }
Matt Carlson0e1406d2009-11-02 12:33:33 +000014762
Michael Chan91024262012-09-28 07:12:38 +000014763 tp->txq_max = 1;
14764 tp->rxq_max = 1;
14765 if (tp->irq_max > 1) {
14766 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14767 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14768
14769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14771 tp->txq_max = tp->irq_max - 1;
14772 }
14773
Matt Carlsonb7abee62012-06-07 12:56:54 +000014774 if (tg3_flag(tp, 5755_PLUS) ||
14775 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Joe Perches63c3a662011-04-26 08:12:10 +000014776 tg3_flag_set(tp, SHORT_DMA_BUG);
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000014777
Matt Carlsone31aa982011-07-27 14:20:53 +000014778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
Matt Carlsona4cb4282011-12-14 11:09:58 +000014779 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
Matt Carlsone31aa982011-07-27 14:20:53 +000014780
Matt Carlsonfa6b2aa2011-11-21 15:01:19 +000014781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
Joe Perches63c3a662011-04-26 08:12:10 +000014784 tg3_flag_set(tp, LRG_PROD_RING_CAP);
Matt Carlsonde9f5232011-04-05 14:22:43 +000014785
Joe Perches63c3a662011-04-26 08:12:10 +000014786 if (tg3_flag(tp, 57765_PLUS) &&
Matt Carlsona0512942011-07-27 14:20:54 +000014787 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
Joe Perches63c3a662011-04-26 08:12:10 +000014788 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
Matt Carlsonb703df62009-12-03 08:36:21 +000014789
Joe Perches63c3a662011-04-26 08:12:10 +000014790 if (!tg3_flag(tp, 5705_PLUS) ||
14791 tg3_flag(tp, 5780_CLASS) ||
14792 tg3_flag(tp, USE_JUMBO_BDFLAG))
14793 tg3_flag_set(tp, JUMBO_CAPABLE);
Michael Chan0f893dc2005-07-25 12:30:38 -070014794
Matt Carlson52f44902008-11-21 17:17:04 -080014795 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14796 &pci_state_reg);
14797
Jon Mason708ebb3a2011-06-27 12:56:50 +000014798 if (pci_is_pcie(tp->pdev)) {
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014799 u16 lnkctl;
14800
Joe Perches63c3a662011-04-26 08:12:10 +000014801 tg3_flag_set(tp, PCI_EXPRESS);
Matt Carlson5f5c51e2007-11-12 21:19:37 -080014802
Jiang Liu0f49bfb2012-08-20 13:28:20 -060014803 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014804 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
Matt Carlson7196cd62011-05-19 16:02:44 +000014805 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14806 ASIC_REV_5906) {
Joe Perches63c3a662011-04-26 08:12:10 +000014807 tg3_flag_clear(tp, HW_TSO_2);
Matt Carlsondabc5c62011-05-19 12:12:52 +000014808 tg3_flag_clear(tp, TSO_CAPABLE);
Matt Carlson7196cd62011-05-19 16:02:44 +000014809 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson321d32a2008-11-21 17:22:19 -080014811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson9cf74eb2009-04-20 06:58:27 +000014812 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14813 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
Joe Perches63c3a662011-04-26 08:12:10 +000014814 tg3_flag_set(tp, CLKREQ_BUG);
Matt Carlson614b0592010-01-20 16:58:02 +000014815 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +000014816 tg3_flag_set(tp, L1PLLPD_EN);
Michael Chanc7835a72006-11-15 21:14:42 -080014817 }
Matt Carlson52f44902008-11-21 17:17:04 -080014818 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Jon Mason708ebb3a2011-06-27 12:56:50 +000014819 /* BCM5785 devices are effectively PCIe devices, and should
14820 * follow PCIe codepaths, but do not have a PCIe capabilities
14821 * section.
Matt Carlson93a700a2011-08-31 11:44:54 +000014822 */
Joe Perches63c3a662011-04-26 08:12:10 +000014823 tg3_flag_set(tp, PCI_EXPRESS);
14824 } else if (!tg3_flag(tp, 5705_PLUS) ||
14825 tg3_flag(tp, 5780_CLASS)) {
Matt Carlson52f44902008-11-21 17:17:04 -080014826 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14827 if (!tp->pcix_cap) {
Matt Carlson2445e462010-04-05 10:19:21 +000014828 dev_err(&tp->pdev->dev,
14829 "Cannot find PCI-X capability, aborting\n");
Matt Carlson52f44902008-11-21 17:17:04 -080014830 return -EIO;
14831 }
14832
14833 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
Joe Perches63c3a662011-04-26 08:12:10 +000014834 tg3_flag_set(tp, PCIX_MODE);
Matt Carlson52f44902008-11-21 17:17:04 -080014835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014836
Michael Chan399de502005-10-03 14:02:39 -070014837 /* If we have an AMD 762 or VIA K8T800 chipset, write
14838 * reordering to the mailbox registers done by the host
14839 * controller can cause major troubles. We read back from
14840 * every mailbox register write to force the writes to be
14841 * posted to the chip in order.
14842 */
Matt Carlson41434702011-03-09 16:58:22 +000014843 if (pci_dev_present(tg3_write_reorder_chipsets) &&
Joe Perches63c3a662011-04-26 08:12:10 +000014844 !tg3_flag(tp, PCI_EXPRESS))
14845 tg3_flag_set(tp, MBOX_WRITE_REORDER);
Michael Chan399de502005-10-03 14:02:39 -070014846
Matt Carlson69fc4052008-12-21 20:19:57 -080014847 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14848 &tp->pci_cacheline_sz);
14849 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14850 &tp->pci_lat_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14852 tp->pci_lat_timer < 64) {
14853 tp->pci_lat_timer = 64;
Matt Carlson69fc4052008-12-21 20:19:57 -080014854 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14855 tp->pci_lat_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014856 }
14857
Matt Carlson16821282011-07-13 09:27:28 +000014858 /* Important! -- It is critical that the PCI-X hw workaround
14859 * situation is decided before the first MMIO register access.
14860 */
Matt Carlson52f44902008-11-21 17:17:04 -080014861 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14862 /* 5700 BX chips need to have their TX producer index
14863 * mailboxes written twice to workaround a bug.
14864 */
Joe Perches63c3a662011-04-26 08:12:10 +000014865 tg3_flag_set(tp, TXD_MBOX_HWBUG);
Matt Carlson9974a352007-10-07 23:27:28 -070014866
Matt Carlson52f44902008-11-21 17:17:04 -080014867 /* If we are in PCI-X mode, enable register write workaround.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014868 *
14869 * The workaround is to use indirect register accesses
14870 * for all chip writes not to mailbox registers.
14871 */
Joe Perches63c3a662011-04-26 08:12:10 +000014872 if (tg3_flag(tp, PCIX_MODE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070014873 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014874
Joe Perches63c3a662011-04-26 08:12:10 +000014875 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014876
14877 /* The chip can have it's power management PCI config
14878 * space registers clobbered due to this bug.
14879 * So explicitly force the chip into D0 here.
14880 */
Matt Carlson9974a352007-10-07 23:27:28 -070014881 pci_read_config_dword(tp->pdev,
14882 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070014883 &pm_reg);
14884 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14885 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070014886 pci_write_config_dword(tp->pdev,
14887 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070014888 pm_reg);
14889
14890 /* Also, force SERR#/PERR# in PCI command. */
14891 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14892 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14893 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14894 }
14895 }
14896
Linus Torvalds1da177e2005-04-16 15:20:36 -070014897 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
Joe Perches63c3a662011-04-26 08:12:10 +000014898 tg3_flag_set(tp, PCI_HIGH_SPEED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014899 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
Joe Perches63c3a662011-04-26 08:12:10 +000014900 tg3_flag_set(tp, PCI_32BIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014901
14902 /* Chip-specific fixup from Broadcom driver */
14903 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14904 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14905 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14906 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14907 }
14908
Michael Chan1ee582d2005-08-09 20:16:46 -070014909 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070014910 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070014911 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070014912 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070014913 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070014914 tp->write32_tx_mbox = tg3_write32;
14915 tp->write32_rx_mbox = tg3_write32;
14916
14917 /* Various workaround register access methods */
Joe Perches63c3a662011-04-26 08:12:10 +000014918 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
Michael Chan1ee582d2005-08-09 20:16:46 -070014919 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070014920 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
Joe Perches63c3a662011-04-26 08:12:10 +000014921 (tg3_flag(tp, PCI_EXPRESS) &&
Matt Carlson98efd8a2007-05-05 12:47:25 -070014922 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14923 /*
14924 * Back to back register writes can cause problems on these
14925 * chips, the workaround is to read back all reg writes
14926 * except those to mailbox regs.
14927 *
14928 * See tg3_write_indirect_reg32().
14929 */
Michael Chan1ee582d2005-08-09 20:16:46 -070014930 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070014931 }
14932
Joe Perches63c3a662011-04-26 08:12:10 +000014933 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
Michael Chan1ee582d2005-08-09 20:16:46 -070014934 tp->write32_tx_mbox = tg3_write32_tx_mbox;
Joe Perches63c3a662011-04-26 08:12:10 +000014935 if (tg3_flag(tp, MBOX_WRITE_REORDER))
Michael Chan1ee582d2005-08-09 20:16:46 -070014936 tp->write32_rx_mbox = tg3_write_flush_reg32;
14937 }
Michael Chan20094932005-08-09 20:16:32 -070014938
Joe Perches63c3a662011-04-26 08:12:10 +000014939 if (tg3_flag(tp, ICH_WORKAROUND)) {
Michael Chan68929142005-08-09 20:17:14 -070014940 tp->read32 = tg3_read_indirect_reg32;
14941 tp->write32 = tg3_write_indirect_reg32;
14942 tp->read32_mbox = tg3_read_indirect_mbox;
14943 tp->write32_mbox = tg3_write_indirect_mbox;
14944 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14945 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14946
14947 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070014948 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070014949
14950 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14951 pci_cmd &= ~PCI_COMMAND_MEMORY;
14952 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14953 }
Michael Chanb5d37722006-09-27 16:06:21 -070014954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14955 tp->read32_mbox = tg3_read32_mbox_5906;
14956 tp->write32_mbox = tg3_write32_mbox_5906;
14957 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14958 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14959 }
Michael Chan68929142005-08-09 20:17:14 -070014960
Michael Chanbbadf502006-04-06 21:46:34 -070014961 if (tp->write32 == tg3_write_indirect_reg32 ||
Joe Perches63c3a662011-04-26 08:12:10 +000014962 (tg3_flag(tp, PCIX_MODE) &&
Michael Chanbbadf502006-04-06 21:46:34 -070014963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070014964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Joe Perches63c3a662011-04-26 08:12:10 +000014965 tg3_flag_set(tp, SRAM_USE_CONFIG);
Michael Chanbbadf502006-04-06 21:46:34 -070014966
Matt Carlson16821282011-07-13 09:27:28 +000014967 /* The memory arbiter has to be enabled in order for SRAM accesses
14968 * to succeed. Normally on powerup the tg3 chip firmware will make
14969 * sure it is enabled, but other entities such as system netboot
14970 * code might disable it.
14971 */
14972 val = tr32(MEMARB_MODE);
14973 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14974
Matt Carlson9dc5e342011-11-04 09:15:02 +000014975 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14977 tg3_flag(tp, 5780_CLASS)) {
14978 if (tg3_flag(tp, PCIX_MODE)) {
14979 pci_read_config_dword(tp->pdev,
14980 tp->pcix_cap + PCI_X_STATUS,
14981 &val);
14982 tp->pci_fn = val & 0x7;
14983 }
14984 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14985 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14986 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14987 NIC_SRAM_CPMUSTAT_SIG) {
14988 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14989 tp->pci_fn = tp->pci_fn ? 1 : 0;
14990 }
14991 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14993 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14994 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14995 NIC_SRAM_CPMUSTAT_SIG) {
14996 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14997 TG3_CPMU_STATUS_FSHFT_5719;
14998 }
Matt Carlson69f11c92011-07-13 09:27:30 +000014999 }
15000
Michael Chan7d0c41e2005-04-21 17:06:20 -070015001 /* Get eeprom hw config before calling tg3_set_power_state().
Joe Perches63c3a662011-04-26 08:12:10 +000015002 * In particular, the TG3_FLAG_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070015003 * determined before calling tg3_set_power_state() so that
15004 * we know whether or not to switch out of Vaux power.
15005 * When the flag is set, it means that GPIO1 is used for eeprom
15006 * write protect and also implies that it is a LOM where GPIOs
15007 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040015008 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070015009 tg3_get_eeprom_hw_cfg(tp);
15010
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000015011 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15012 tg3_flag_clear(tp, TSO_CAPABLE);
15013 tg3_flag_clear(tp, TSO_BUG);
15014 tp->fw_needed = NULL;
15015 }
15016
Joe Perches63c3a662011-04-26 08:12:10 +000015017 if (tg3_flag(tp, ENABLE_APE)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070015018 /* Allow reads and writes to the
15019 * APE register and memory space.
15020 */
15021 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +000015022 PCISTATE_ALLOW_APE_SHMEM_WR |
15023 PCISTATE_ALLOW_APE_PSPACE_WR;
Matt Carlson0d3031d2007-10-10 18:02:43 -070015024 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15025 pci_state_reg);
Matt Carlsonc9cab242011-07-13 09:27:27 +000015026
15027 tg3_ape_lock_init(tp);
Matt Carlson0d3031d2007-10-10 18:02:43 -070015028 }
15029
Matt Carlson16821282011-07-13 09:27:28 +000015030 /* Set up tp->grc_local_ctrl before calling
15031 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15032 * will bring 5700's external PHY out of reset.
Michael Chan314fba32005-04-21 17:07:04 -070015033 * It is also used as eeprom write protect on LOMs.
15034 */
15035 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
Matt Carlson6ff6f812011-05-19 12:12:54 +000015036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
Joe Perches63c3a662011-04-26 08:12:10 +000015037 tg3_flag(tp, EEPROM_WRITE_PROT))
Michael Chan314fba32005-04-21 17:07:04 -070015038 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15039 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070015040 /* Unused GPIO3 must be driven as output on 5752 because there
15041 * are no pull-up resistors on unused GPIO pins.
15042 */
15043 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15044 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070015045
Matt Carlson321d32a2008-11-21 17:22:19 -080015046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsoncb4ed1f2010-01-20 16:58:09 +000015047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
Matt Carlson55086ad2011-12-14 11:09:59 +000015048 tg3_flag(tp, 57765_CLASS))
Michael Chanaf36e6b2006-03-23 01:28:06 -080015049 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15050
Matt Carlson8d519ab2009-04-20 06:58:01 +000015051 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
Matt Carlson5f0c4a32008-06-09 15:41:12 -070015053 /* Turn off the debug UART. */
15054 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
Joe Perches63c3a662011-04-26 08:12:10 +000015055 if (tg3_flag(tp, IS_NIC))
Matt Carlson5f0c4a32008-06-09 15:41:12 -070015056 /* Keep VMain power. */
15057 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15058 GRC_LCLCTRL_GPIO_OUTPUT0;
15059 }
15060
Matt Carlson16821282011-07-13 09:27:28 +000015061 /* Switch out of Vaux if it is a NIC */
15062 tg3_pwrsrc_switch_to_vmain(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015063
Linus Torvalds1da177e2005-04-16 15:20:36 -070015064 /* Derive initial jumbo mode from MTU assigned in
15065 * ether_setup() via the alloc_etherdev() call
15066 */
Joe Perches63c3a662011-04-26 08:12:10 +000015067 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15068 tg3_flag_set(tp, JUMBO_RING_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015069
15070 /* Determine WakeOnLan speed to use. */
15071 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15072 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15073 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15074 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
Joe Perches63c3a662011-04-26 08:12:10 +000015075 tg3_flag_clear(tp, WOL_SPEED_100MB);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015076 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000015077 tg3_flag_set(tp, WOL_SPEED_100MB);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015078 }
15079
Matt Carlson7f97a4b2009-08-25 10:10:03 +000015080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015081 tp->phy_flags |= TG3_PHYFLG_IS_FET;
Matt Carlson7f97a4b2009-08-25 10:10:03 +000015082
Linus Torvalds1da177e2005-04-16 15:20:36 -070015083 /* A few boards don't want Ethernet@WireSpeed phy feature */
Matt Carlson6ff6f812011-05-19 12:12:54 +000015084 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15085 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070015086 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070015087 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015088 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15089 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15090 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015091
15092 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15093 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015094 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015095 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015096 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015097
Joe Perches63c3a662011-04-26 08:12:10 +000015098 if (tg3_flag(tp, 5705_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015099 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
Matt Carlson321d32a2008-11-21 17:22:19 -080015100 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000015101 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015102 !tg3_flag(tp, 57765_PLUS)) {
Michael Chanc424cb22006-04-29 18:56:34 -070015103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070015104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070015105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080015107 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15108 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015109 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080015110 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015111 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
Matt Carlson321d32a2008-11-21 17:22:19 -080015112 } else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015113 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
Michael Chanc424cb22006-04-29 18:56:34 -070015114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015115
Matt Carlsonb2a5c192008-04-03 21:44:44 -070015116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15117 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15118 tp->phy_otp = tg3_read_otp_phycfg(tp);
15119 if (tp->phy_otp == 0)
15120 tp->phy_otp = TG3_OTP_DEFAULT;
15121 }
15122
Joe Perches63c3a662011-04-26 08:12:10 +000015123 if (tg3_flag(tp, CPMU_PRESENT))
Matt Carlson8ef21422008-05-02 16:47:53 -070015124 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15125 else
15126 tp->mi_mode = MAC_MI_MODE_BASE;
15127
Linus Torvalds1da177e2005-04-16 15:20:36 -070015128 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015129 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15130 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15131 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15132
Matt Carlson4d958472011-04-20 07:57:35 +000015133 /* Set these bits to enable statistics workaround. */
15134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15135 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15136 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15137 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15138 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15139 }
15140
Matt Carlson321d32a2008-11-21 17:22:19 -080015141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15142 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
Joe Perches63c3a662011-04-26 08:12:10 +000015143 tg3_flag_set(tp, USE_PHYLIB);
Matt Carlson57e69832008-05-25 23:48:31 -070015144
Matt Carlson158d7ab2008-05-29 01:37:54 -070015145 err = tg3_mdio_init(tp);
15146 if (err)
15147 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015148
15149 /* Initialize data/descriptor byte/word swapping. */
15150 val = tr32(GRC_MODE);
Matt Carlsonf2096f92011-04-05 14:22:48 +000015151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15152 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15153 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15154 GRC_MODE_B2HRX_ENABLE |
15155 GRC_MODE_HTX2B_ENABLE |
15156 GRC_MODE_HOST_STACKUP);
15157 else
15158 val &= GRC_MODE_HOST_STACKUP;
15159
Linus Torvalds1da177e2005-04-16 15:20:36 -070015160 tw32(GRC_MODE, val | tp->grc_mode);
15161
15162 tg3_switch_clocks(tp);
15163
15164 /* Clear this out for sanity. */
15165 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15166
15167 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15168 &pci_state_reg);
15169 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015170 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015171 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15172
15173 if (chiprevid == CHIPREV_ID_5701_A0 ||
15174 chiprevid == CHIPREV_ID_5701_B0 ||
15175 chiprevid == CHIPREV_ID_5701_B2 ||
15176 chiprevid == CHIPREV_ID_5701_B5) {
15177 void __iomem *sram_base;
15178
15179 /* Write some dummy words into the SRAM status block
15180 * area, see if it reads back correctly. If the return
15181 * value is bad, force enable the PCIX workaround.
15182 */
15183 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15184
15185 writel(0x00000000, sram_base);
15186 writel(0x00000000, sram_base + 4);
15187 writel(0xffffffff, sram_base + 4);
15188 if (readl(sram_base) != 0x00000000)
Joe Perches63c3a662011-04-26 08:12:10 +000015189 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015190 }
15191 }
15192
15193 udelay(50);
15194 tg3_nvram_init(tp);
15195
15196 grc_misc_cfg = tr32(GRC_MISC_CFG);
15197 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15198
Linus Torvalds1da177e2005-04-16 15:20:36 -070015199 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15200 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15201 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
Joe Perches63c3a662011-04-26 08:12:10 +000015202 tg3_flag_set(tp, IS_5788);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015203
Joe Perches63c3a662011-04-26 08:12:10 +000015204 if (!tg3_flag(tp, IS_5788) &&
Matt Carlson6ff6f812011-05-19 12:12:54 +000015205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
Joe Perches63c3a662011-04-26 08:12:10 +000015206 tg3_flag_set(tp, TAGGED_STATUS);
15207 if (tg3_flag(tp, TAGGED_STATUS)) {
David S. Millerfac9b832005-05-18 22:46:34 -070015208 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15209 HOSTCC_MODE_CLRTICK_TXBD);
15210
15211 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15212 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15213 tp->misc_host_ctrl);
15214 }
15215
Matt Carlson3bda1252008-08-15 14:08:22 -070015216 /* Preserve the APE MAC_MODE bits */
Joe Perches63c3a662011-04-26 08:12:10 +000015217 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsond2394e6b2010-11-24 08:31:47 +000015218 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
Matt Carlson3bda1252008-08-15 14:08:22 -070015219 else
Matt Carlson6e01b202011-08-19 13:58:20 +000015220 tp->mac_mode = 0;
Matt Carlson3bda1252008-08-15 14:08:22 -070015221
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000015222 if (tg3_10_100_only_device(tp, ent))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015223 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015224
15225 err = tg3_phy_probe(tp);
15226 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000015227 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015228 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070015229 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015230 }
15231
Matt Carlson184b8902010-04-05 10:19:25 +000015232 tg3_read_vpd(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080015233 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015234
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015235 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15236 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015237 } else {
15238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015239 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015240 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015241 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015242 }
15243
15244 /* 5700 {AX,BX} chips have a broken status block link
15245 * change bit implementation, so we must use the
15246 * status register in those cases.
15247 */
15248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
Joe Perches63c3a662011-04-26 08:12:10 +000015249 tg3_flag_set(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015250 else
Joe Perches63c3a662011-04-26 08:12:10 +000015251 tg3_flag_clear(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015252
15253 /* The led_ctrl is set during tg3_phy_probe, here we might
15254 * have to force the link status polling mechanism based
15255 * upon subsystem IDs.
15256 */
15257 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070015258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015259 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15260 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
Joe Perches63c3a662011-04-26 08:12:10 +000015261 tg3_flag_set(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015262 }
15263
15264 /* For all SERDES we poll the MAC status register. */
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015265 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Joe Perches63c3a662011-04-26 08:12:10 +000015266 tg3_flag_set(tp, POLL_SERDES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015267 else
Joe Perches63c3a662011-04-26 08:12:10 +000015268 tg3_flag_clear(tp, POLL_SERDES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015269
Eric Dumazet9205fd92011-11-18 06:47:01 +000015270 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
Matt Carlsond2757fc2010-04-12 06:58:27 +000015271 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015273 tg3_flag(tp, PCIX_MODE)) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000015274 tp->rx_offset = NET_SKB_PAD;
Matt Carlsond2757fc2010-04-12 06:58:27 +000015275#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Matt Carlson9dc7a112010-04-12 06:58:28 +000015276 tp->rx_copy_thresh = ~(u16)0;
Matt Carlsond2757fc2010-04-12 06:58:27 +000015277#endif
15278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015279
Matt Carlson2c49a442010-09-30 10:34:35 +000015280 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15281 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000015282 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15283
Matt Carlson2c49a442010-09-30 10:34:35 +000015284 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
Michael Chanf92905d2006-06-29 20:14:29 -070015285
15286 /* Increment the rx prod index on the rx std ring by at most
15287 * 8 for these chips to workaround hw errata.
15288 */
15289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15292 tp->rx_std_max_post = 8;
15293
Joe Perches63c3a662011-04-26 08:12:10 +000015294 if (tg3_flag(tp, ASPM_WORKAROUND))
Matt Carlson8ed5d972007-05-07 00:25:49 -070015295 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15296 PCIE_PWR_MGMT_L1_THRESH_MSK;
15297
Linus Torvalds1da177e2005-04-16 15:20:36 -070015298 return err;
15299}
15300
David S. Miller49b6e95f2007-03-29 01:38:42 -070015301#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070015302static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15303{
15304 struct net_device *dev = tp->dev;
15305 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070015306 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070015307 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070015308 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015309
David S. Miller49b6e95f2007-03-29 01:38:42 -070015310 addr = of_get_property(dp, "local-mac-address", &len);
15311 if (addr && len == 6) {
15312 memcpy(dev->dev_addr, addr, 6);
15313 memcpy(dev->perm_addr, dev->dev_addr, 6);
15314 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015315 }
15316 return -ENODEV;
15317}
15318
15319static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15320{
15321 struct net_device *dev = tp->dev;
15322
15323 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070015324 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015325 return 0;
15326}
15327#endif
15328
15329static int __devinit tg3_get_device_address(struct tg3 *tp)
15330{
15331 struct net_device *dev = tp->dev;
15332 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080015333 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015334
David S. Miller49b6e95f2007-03-29 01:38:42 -070015335#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070015336 if (!tg3_get_macaddr_sparc(tp))
15337 return 0;
15338#endif
15339
15340 mac_offset = 0x7c;
Matt Carlson6ff6f812011-05-19 12:12:54 +000015341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
Joe Perches63c3a662011-04-26 08:12:10 +000015342 tg3_flag(tp, 5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015343 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15344 mac_offset = 0xcc;
15345 if (tg3_nvram_lock(tp))
15346 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15347 else
15348 tg3_nvram_unlock(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000015349 } else if (tg3_flag(tp, 5717_PLUS)) {
Matt Carlson69f11c92011-07-13 09:27:30 +000015350 if (tp->pci_fn & 1)
Matt Carlsona1b950d2009-09-01 13:20:17 +000015351 mac_offset = 0xcc;
Matt Carlson69f11c92011-07-13 09:27:30 +000015352 if (tp->pci_fn > 1)
Matt Carlsona50d0792010-06-05 17:24:37 +000015353 mac_offset += 0x18c;
Matt Carlsona1b950d2009-09-01 13:20:17 +000015354 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chanb5d37722006-09-27 16:06:21 -070015355 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015356
15357 /* First try to get it from MAC address mailbox. */
15358 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15359 if ((hi >> 16) == 0x484b) {
15360 dev->dev_addr[0] = (hi >> 8) & 0xff;
15361 dev->dev_addr[1] = (hi >> 0) & 0xff;
15362
15363 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15364 dev->dev_addr[2] = (lo >> 24) & 0xff;
15365 dev->dev_addr[3] = (lo >> 16) & 0xff;
15366 dev->dev_addr[4] = (lo >> 8) & 0xff;
15367 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015368
Michael Chan008652b2006-03-27 23:14:53 -080015369 /* Some old bootcode may report a 0 MAC address in SRAM */
15370 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15371 }
15372 if (!addr_ok) {
15373 /* Next, try NVRAM. */
Joe Perches63c3a662011-04-26 08:12:10 +000015374 if (!tg3_flag(tp, NO_NVRAM) &&
Matt Carlsondf259d82009-04-20 06:57:14 +000015375 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
Matt Carlson6d348f22009-02-25 14:25:52 +000015376 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
Matt Carlson62cedd12009-04-20 14:52:29 -070015377 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15378 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
Michael Chan008652b2006-03-27 23:14:53 -080015379 }
15380 /* Finally just fetch it out of the MAC control regs. */
15381 else {
15382 hi = tr32(MAC_ADDR_0_HIGH);
15383 lo = tr32(MAC_ADDR_0_LOW);
15384
15385 dev->dev_addr[5] = lo & 0xff;
15386 dev->dev_addr[4] = (lo >> 8) & 0xff;
15387 dev->dev_addr[3] = (lo >> 16) & 0xff;
15388 dev->dev_addr[2] = (lo >> 24) & 0xff;
15389 dev->dev_addr[1] = hi & 0xff;
15390 dev->dev_addr[0] = (hi >> 8) & 0xff;
15391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015392 }
15393
15394 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070015395#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070015396 if (!tg3_get_default_macaddr_sparc(tp))
15397 return 0;
15398#endif
15399 return -EINVAL;
15400 }
John W. Linville2ff43692005-09-12 14:44:20 -070015401 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015402 return 0;
15403}
15404
David S. Miller59e6b432005-05-18 22:50:10 -070015405#define BOUNDARY_SINGLE_CACHELINE 1
15406#define BOUNDARY_MULTI_CACHELINE 2
15407
15408static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15409{
15410 int cacheline_size;
15411 u8 byte;
15412 int goal;
15413
15414 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15415 if (byte == 0)
15416 cacheline_size = 1024;
15417 else
15418 cacheline_size = (int) byte * 4;
15419
15420 /* On 5703 and later chips, the boundary bits have no
15421 * effect.
15422 */
15423 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15424 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015425 !tg3_flag(tp, PCI_EXPRESS))
David S. Miller59e6b432005-05-18 22:50:10 -070015426 goto out;
15427
15428#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15429 goal = BOUNDARY_MULTI_CACHELINE;
15430#else
15431#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15432 goal = BOUNDARY_SINGLE_CACHELINE;
15433#else
15434 goal = 0;
15435#endif
15436#endif
15437
Joe Perches63c3a662011-04-26 08:12:10 +000015438 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000015439 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15440 goto out;
15441 }
15442
David S. Miller59e6b432005-05-18 22:50:10 -070015443 if (!goal)
15444 goto out;
15445
15446 /* PCI controllers on most RISC systems tend to disconnect
15447 * when a device tries to burst across a cache-line boundary.
15448 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15449 *
15450 * Unfortunately, for PCI-E there are only limited
15451 * write-side controls for this, and thus for reads
15452 * we will still get the disconnects. We'll also waste
15453 * these PCI cycles for both read and write for chips
15454 * other than 5700 and 5701 which do not implement the
15455 * boundary bits.
15456 */
Joe Perches63c3a662011-04-26 08:12:10 +000015457 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
David S. Miller59e6b432005-05-18 22:50:10 -070015458 switch (cacheline_size) {
15459 case 16:
15460 case 32:
15461 case 64:
15462 case 128:
15463 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15464 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15465 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15466 } else {
15467 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15468 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15469 }
15470 break;
15471
15472 case 256:
15473 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15474 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15475 break;
15476
15477 default:
15478 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15479 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15480 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070015481 }
Joe Perches63c3a662011-04-26 08:12:10 +000015482 } else if (tg3_flag(tp, PCI_EXPRESS)) {
David S. Miller59e6b432005-05-18 22:50:10 -070015483 switch (cacheline_size) {
15484 case 16:
15485 case 32:
15486 case 64:
15487 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15488 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15489 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15490 break;
15491 }
15492 /* fallthrough */
15493 case 128:
15494 default:
15495 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15496 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15497 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070015498 }
David S. Miller59e6b432005-05-18 22:50:10 -070015499 } else {
15500 switch (cacheline_size) {
15501 case 16:
15502 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15503 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15504 DMA_RWCTRL_WRITE_BNDRY_16);
15505 break;
15506 }
15507 /* fallthrough */
15508 case 32:
15509 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15510 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15511 DMA_RWCTRL_WRITE_BNDRY_32);
15512 break;
15513 }
15514 /* fallthrough */
15515 case 64:
15516 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15517 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15518 DMA_RWCTRL_WRITE_BNDRY_64);
15519 break;
15520 }
15521 /* fallthrough */
15522 case 128:
15523 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15524 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15525 DMA_RWCTRL_WRITE_BNDRY_128);
15526 break;
15527 }
15528 /* fallthrough */
15529 case 256:
15530 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15531 DMA_RWCTRL_WRITE_BNDRY_256);
15532 break;
15533 case 512:
15534 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15535 DMA_RWCTRL_WRITE_BNDRY_512);
15536 break;
15537 case 1024:
15538 default:
15539 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15540 DMA_RWCTRL_WRITE_BNDRY_1024);
15541 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070015542 }
David S. Miller59e6b432005-05-18 22:50:10 -070015543 }
15544
15545out:
15546 return val;
15547}
15548
Linus Torvalds1da177e2005-04-16 15:20:36 -070015549static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15550{
15551 struct tg3_internal_buffer_desc test_desc;
15552 u32 sram_dma_descs;
15553 int i, ret;
15554
15555 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15556
15557 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15558 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15559 tw32(RDMAC_STATUS, 0);
15560 tw32(WDMAC_STATUS, 0);
15561
15562 tw32(BUFMGR_MODE, 0);
15563 tw32(FTQ_RESET, 0);
15564
15565 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15566 test_desc.addr_lo = buf_dma & 0xffffffff;
15567 test_desc.nic_mbuf = 0x00002100;
15568 test_desc.len = size;
15569
15570 /*
15571 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15572 * the *second* time the tg3 driver was getting loaded after an
15573 * initial scan.
15574 *
15575 * Broadcom tells me:
15576 * ...the DMA engine is connected to the GRC block and a DMA
15577 * reset may affect the GRC block in some unpredictable way...
15578 * The behavior of resets to individual blocks has not been tested.
15579 *
15580 * Broadcom noted the GRC reset will also reset all sub-components.
15581 */
15582 if (to_device) {
15583 test_desc.cqid_sqid = (13 << 8) | 2;
15584
15585 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15586 udelay(40);
15587 } else {
15588 test_desc.cqid_sqid = (16 << 8) | 7;
15589
15590 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15591 udelay(40);
15592 }
15593 test_desc.flags = 0x00000005;
15594
15595 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15596 u32 val;
15597
15598 val = *(((u32 *)&test_desc) + i);
15599 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15600 sram_dma_descs + (i * sizeof(u32)));
15601 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15602 }
15603 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15604
Matt Carlson859a588792010-04-05 10:19:28 +000015605 if (to_device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015606 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
Matt Carlson859a588792010-04-05 10:19:28 +000015607 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070015608 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015609
15610 ret = -ENODEV;
15611 for (i = 0; i < 40; i++) {
15612 u32 val;
15613
15614 if (to_device)
15615 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15616 else
15617 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15618 if ((val & 0xffff) == sram_dma_descs) {
15619 ret = 0;
15620 break;
15621 }
15622
15623 udelay(100);
15624 }
15625
15626 return ret;
15627}
15628
David S. Millerded73402005-05-23 13:59:47 -070015629#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070015630
Matt Carlson41434702011-03-09 16:58:22 +000015631static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
Joe Perches895950c2010-12-21 02:16:08 -080015632 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15633 { },
15634};
15635
Linus Torvalds1da177e2005-04-16 15:20:36 -070015636static int __devinit tg3_test_dma(struct tg3 *tp)
15637{
15638 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070015639 u32 *buf, saved_dma_rwctrl;
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000015640 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015641
Matt Carlson4bae65c2010-11-24 08:31:52 +000015642 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15643 &buf_dma, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015644 if (!buf) {
15645 ret = -ENOMEM;
15646 goto out_nofree;
15647 }
15648
15649 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15650 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15651
David S. Miller59e6b432005-05-18 22:50:10 -070015652 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015653
Joe Perches63c3a662011-04-26 08:12:10 +000015654 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000015655 goto out;
15656
Joe Perches63c3a662011-04-26 08:12:10 +000015657 if (tg3_flag(tp, PCI_EXPRESS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015658 /* DMA read watermark not used on PCIE */
15659 tp->dma_rwctrl |= 0x00180000;
Joe Perches63c3a662011-04-26 08:12:10 +000015660 } else if (!tg3_flag(tp, PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070015661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015663 tp->dma_rwctrl |= 0x003f0000;
15664 else
15665 tp->dma_rwctrl |= 0x003f000f;
15666 } else {
15667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15669 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080015670 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015671
Michael Chan4a29cc22006-03-19 13:21:12 -080015672 /* If the 5704 is behind the EPB bridge, we can
15673 * do the less restrictive ONE_DMA workaround for
15674 * better performance.
15675 */
Joe Perches63c3a662011-04-26 08:12:10 +000015676 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
Michael Chan4a29cc22006-03-19 13:21:12 -080015677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15678 tp->dma_rwctrl |= 0x8000;
15679 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015680 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15681
Michael Chan49afdeb2007-02-13 12:17:03 -080015682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15683 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070015684 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080015685 tp->dma_rwctrl |=
15686 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15687 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15688 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070015689 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15690 /* 5780 always in PCIX mode */
15691 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070015692 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15693 /* 5714 always in PCIX mode */
15694 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015695 } else {
15696 tp->dma_rwctrl |= 0x001b000f;
15697 }
15698 }
15699
15700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15701 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15702 tp->dma_rwctrl &= 0xfffffff0;
15703
15704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15706 /* Remove this if it causes problems for some boards. */
15707 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15708
15709 /* On 5700/5701 chips, we need to set this bit.
15710 * Otherwise the chip will issue cacheline transactions
15711 * to streamable DMA memory with not all the byte
15712 * enables turned on. This is an error on several
15713 * RISC PCI controllers, in particular sparc64.
15714 *
15715 * On 5703/5704 chips, this bit has been reassigned
15716 * a different meaning. In particular, it is used
15717 * on those chips to enable a PCI-X workaround.
15718 */
15719 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15720 }
15721
15722 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15723
15724#if 0
15725 /* Unneeded, already done by tg3_get_invariants. */
15726 tg3_switch_clocks(tp);
15727#endif
15728
Linus Torvalds1da177e2005-04-16 15:20:36 -070015729 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15730 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15731 goto out;
15732
David S. Miller59e6b432005-05-18 22:50:10 -070015733 /* It is best to perform DMA test with maximum write burst size
15734 * to expose the 5700/5701 write DMA bug.
15735 */
15736 saved_dma_rwctrl = tp->dma_rwctrl;
15737 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15738 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15739
Linus Torvalds1da177e2005-04-16 15:20:36 -070015740 while (1) {
15741 u32 *p = buf, i;
15742
15743 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15744 p[i] = i;
15745
15746 /* Send the buffer to the chip. */
15747 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15748 if (ret) {
Matt Carlson2445e462010-04-05 10:19:21 +000015749 dev_err(&tp->pdev->dev,
15750 "%s: Buffer write failed. err = %d\n",
15751 __func__, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015752 break;
15753 }
15754
15755#if 0
15756 /* validate data reached card RAM correctly. */
15757 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15758 u32 val;
15759 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15760 if (le32_to_cpu(val) != p[i]) {
Matt Carlson2445e462010-04-05 10:19:21 +000015761 dev_err(&tp->pdev->dev,
15762 "%s: Buffer corrupted on device! "
15763 "(%d != %d)\n", __func__, val, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015764 /* ret = -ENODEV here? */
15765 }
15766 p[i] = 0;
15767 }
15768#endif
15769 /* Now read it back. */
15770 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15771 if (ret) {
Matt Carlson5129c3a2010-04-05 10:19:23 +000015772 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15773 "err = %d\n", __func__, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015774 break;
15775 }
15776
15777 /* Verify it. */
15778 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15779 if (p[i] == i)
15780 continue;
15781
David S. Miller59e6b432005-05-18 22:50:10 -070015782 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15783 DMA_RWCTRL_WRITE_BNDRY_16) {
15784 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015785 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15786 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15787 break;
15788 } else {
Matt Carlson2445e462010-04-05 10:19:21 +000015789 dev_err(&tp->pdev->dev,
15790 "%s: Buffer corrupted on read back! "
15791 "(%d != %d)\n", __func__, p[i], i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015792 ret = -ENODEV;
15793 goto out;
15794 }
15795 }
15796
15797 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15798 /* Success. */
15799 ret = 0;
15800 break;
15801 }
15802 }
David S. Miller59e6b432005-05-18 22:50:10 -070015803 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15804 DMA_RWCTRL_WRITE_BNDRY_16) {
15805 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070015806 * now look for chipsets that are known to expose the
15807 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070015808 */
Matt Carlson41434702011-03-09 16:58:22 +000015809 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070015810 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15811 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
Matt Carlson859a588792010-04-05 10:19:28 +000015812 } else {
Michael Chan6d1cfba2005-06-08 14:13:14 -070015813 /* Safe to use the calculated DMA boundary. */
15814 tp->dma_rwctrl = saved_dma_rwctrl;
Matt Carlson859a588792010-04-05 10:19:28 +000015815 }
Michael Chan6d1cfba2005-06-08 14:13:14 -070015816
David S. Miller59e6b432005-05-18 22:50:10 -070015817 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015819
15820out:
Matt Carlson4bae65c2010-11-24 08:31:52 +000015821 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015822out_nofree:
15823 return ret;
15824}
15825
Linus Torvalds1da177e2005-04-16 15:20:36 -070015826static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15827{
Joe Perches63c3a662011-04-26 08:12:10 +000015828 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlson666bc832010-01-20 16:58:03 +000015829 tp->bufmgr_config.mbuf_read_dma_low_water =
15830 DEFAULT_MB_RDMA_LOW_WATER_5705;
15831 tp->bufmgr_config.mbuf_mac_rx_low_water =
15832 DEFAULT_MB_MACRX_LOW_WATER_57765;
15833 tp->bufmgr_config.mbuf_high_water =
15834 DEFAULT_MB_HIGH_WATER_57765;
15835
15836 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15837 DEFAULT_MB_RDMA_LOW_WATER_5705;
15838 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15839 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15840 tp->bufmgr_config.mbuf_high_water_jumbo =
15841 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
Joe Perches63c3a662011-04-26 08:12:10 +000015842 } else if (tg3_flag(tp, 5705_PLUS)) {
Michael Chanfdfec1722005-07-25 12:31:48 -070015843 tp->bufmgr_config.mbuf_read_dma_low_water =
15844 DEFAULT_MB_RDMA_LOW_WATER_5705;
15845 tp->bufmgr_config.mbuf_mac_rx_low_water =
15846 DEFAULT_MB_MACRX_LOW_WATER_5705;
15847 tp->bufmgr_config.mbuf_high_water =
15848 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070015849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15850 tp->bufmgr_config.mbuf_mac_rx_low_water =
15851 DEFAULT_MB_MACRX_LOW_WATER_5906;
15852 tp->bufmgr_config.mbuf_high_water =
15853 DEFAULT_MB_HIGH_WATER_5906;
15854 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015855
Michael Chanfdfec1722005-07-25 12:31:48 -070015856 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15857 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15858 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15859 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15860 tp->bufmgr_config.mbuf_high_water_jumbo =
15861 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15862 } else {
15863 tp->bufmgr_config.mbuf_read_dma_low_water =
15864 DEFAULT_MB_RDMA_LOW_WATER;
15865 tp->bufmgr_config.mbuf_mac_rx_low_water =
15866 DEFAULT_MB_MACRX_LOW_WATER;
15867 tp->bufmgr_config.mbuf_high_water =
15868 DEFAULT_MB_HIGH_WATER;
15869
15870 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15871 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15872 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15873 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15874 tp->bufmgr_config.mbuf_high_water_jumbo =
15875 DEFAULT_MB_HIGH_WATER_JUMBO;
15876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015877
15878 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15879 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15880}
15881
15882static char * __devinit tg3_phy_string(struct tg3 *tp)
15883{
Matt Carlson79eb6902010-02-17 15:17:03 +000015884 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15885 case TG3_PHY_ID_BCM5400: return "5400";
15886 case TG3_PHY_ID_BCM5401: return "5401";
15887 case TG3_PHY_ID_BCM5411: return "5411";
15888 case TG3_PHY_ID_BCM5701: return "5701";
15889 case TG3_PHY_ID_BCM5703: return "5703";
15890 case TG3_PHY_ID_BCM5704: return "5704";
15891 case TG3_PHY_ID_BCM5705: return "5705";
15892 case TG3_PHY_ID_BCM5750: return "5750";
15893 case TG3_PHY_ID_BCM5752: return "5752";
15894 case TG3_PHY_ID_BCM5714: return "5714";
15895 case TG3_PHY_ID_BCM5780: return "5780";
15896 case TG3_PHY_ID_BCM5755: return "5755";
15897 case TG3_PHY_ID_BCM5787: return "5787";
15898 case TG3_PHY_ID_BCM5784: return "5784";
15899 case TG3_PHY_ID_BCM5756: return "5722/5756";
15900 case TG3_PHY_ID_BCM5906: return "5906";
15901 case TG3_PHY_ID_BCM5761: return "5761";
15902 case TG3_PHY_ID_BCM5718C: return "5718C";
15903 case TG3_PHY_ID_BCM5718S: return "5718S";
15904 case TG3_PHY_ID_BCM57765: return "57765";
Matt Carlson302b5002010-06-05 17:24:38 +000015905 case TG3_PHY_ID_BCM5719C: return "5719C";
Matt Carlson6418f2c2011-04-05 14:22:49 +000015906 case TG3_PHY_ID_BCM5720C: return "5720C";
Matt Carlson79eb6902010-02-17 15:17:03 +000015907 case TG3_PHY_ID_BCM8002: return "8002/serdes";
Linus Torvalds1da177e2005-04-16 15:20:36 -070015908 case 0: return "serdes";
15909 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070015910 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015911}
15912
Michael Chanf9804dd2005-09-27 12:13:10 -070015913static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15914{
Joe Perches63c3a662011-04-26 08:12:10 +000015915 if (tg3_flag(tp, PCI_EXPRESS)) {
Michael Chanf9804dd2005-09-27 12:13:10 -070015916 strcpy(str, "PCI Express");
15917 return str;
Joe Perches63c3a662011-04-26 08:12:10 +000015918 } else if (tg3_flag(tp, PCIX_MODE)) {
Michael Chanf9804dd2005-09-27 12:13:10 -070015919 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15920
15921 strcpy(str, "PCIX:");
15922
15923 if ((clock_ctrl == 7) ||
15924 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15925 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15926 strcat(str, "133MHz");
15927 else if (clock_ctrl == 0)
15928 strcat(str, "33MHz");
15929 else if (clock_ctrl == 2)
15930 strcat(str, "50MHz");
15931 else if (clock_ctrl == 4)
15932 strcat(str, "66MHz");
15933 else if (clock_ctrl == 6)
15934 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070015935 } else {
15936 strcpy(str, "PCI:");
Joe Perches63c3a662011-04-26 08:12:10 +000015937 if (tg3_flag(tp, PCI_HIGH_SPEED))
Michael Chanf9804dd2005-09-27 12:13:10 -070015938 strcat(str, "66MHz");
15939 else
15940 strcat(str, "33MHz");
15941 }
Joe Perches63c3a662011-04-26 08:12:10 +000015942 if (tg3_flag(tp, PCI_32BIT))
Michael Chanf9804dd2005-09-27 12:13:10 -070015943 strcat(str, ":32-bit");
15944 else
15945 strcat(str, ":64-bit");
15946 return str;
15947}
15948
David S. Miller15f98502005-05-18 22:49:26 -070015949static void __devinit tg3_init_coal(struct tg3 *tp)
15950{
15951 struct ethtool_coalesce *ec = &tp->coal;
15952
15953 memset(ec, 0, sizeof(*ec));
15954 ec->cmd = ETHTOOL_GCOALESCE;
15955 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15956 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15957 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15958 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15959 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15960 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15961 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15962 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15963 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15964
15965 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15966 HOSTCC_MODE_CLRTICK_TXBD)) {
15967 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15968 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15969 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15970 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15971 }
Michael Chand244c892005-07-05 14:42:33 -070015972
Joe Perches63c3a662011-04-26 08:12:10 +000015973 if (tg3_flag(tp, 5705_PLUS)) {
Michael Chand244c892005-07-05 14:42:33 -070015974 ec->rx_coalesce_usecs_irq = 0;
15975 ec->tx_coalesce_usecs_irq = 0;
15976 ec->stats_block_coalesce_usecs = 0;
15977 }
David S. Miller15f98502005-05-18 22:49:26 -070015978}
15979
Linus Torvalds1da177e2005-04-16 15:20:36 -070015980static int __devinit tg3_init_one(struct pci_dev *pdev,
15981 const struct pci_device_id *ent)
15982{
Linus Torvalds1da177e2005-04-16 15:20:36 -070015983 struct net_device *dev;
15984 struct tg3 *tp;
Matt Carlson646c9ed2009-09-01 12:58:41 +000015985 int i, err, pm_cap;
15986 u32 sndmbx, rcvmbx, intmbx;
Michael Chanf9804dd2005-09-27 12:13:10 -070015987 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080015988 u64 dma_mask, persist_dma_mask;
Michał Mirosławc8f44af2011-11-15 15:29:55 +000015989 netdev_features_t features = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015990
Joe Perches05dbe002010-02-17 19:44:19 +000015991 printk_once(KERN_INFO "%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015992
15993 err = pci_enable_device(pdev);
15994 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000015995 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070015996 return err;
15997 }
15998
Linus Torvalds1da177e2005-04-16 15:20:36 -070015999 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16000 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000016001 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016002 goto err_out_disable_pdev;
16003 }
16004
16005 pci_set_master(pdev);
16006
16007 /* Find power-management capability. */
16008 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16009 if (pm_cap == 0) {
Matt Carlson2445e462010-04-05 10:19:21 +000016010 dev_err(&pdev->dev,
16011 "Cannot find Power Management capability, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016012 err = -EIO;
16013 goto err_out_free_res;
16014 }
16015
Matt Carlson16821282011-07-13 09:27:28 +000016016 err = pci_set_power_state(pdev, PCI_D0);
16017 if (err) {
16018 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16019 goto err_out_free_res;
16020 }
16021
Matt Carlsonfe5f5782009-09-01 13:09:39 +000016022 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016023 if (!dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070016024 err = -ENOMEM;
Matt Carlson16821282011-07-13 09:27:28 +000016025 goto err_out_power_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016026 }
16027
Linus Torvalds1da177e2005-04-16 15:20:36 -070016028 SET_NETDEV_DEV(dev, &pdev->dev);
16029
Linus Torvalds1da177e2005-04-16 15:20:36 -070016030 tp = netdev_priv(dev);
16031 tp->pdev = pdev;
16032 tp->dev = dev;
16033 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016034 tp->rx_mode = TG3_DEF_RX_MODE;
16035 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070016036
Linus Torvalds1da177e2005-04-16 15:20:36 -070016037 if (tg3_debug > 0)
16038 tp->msg_enable = tg3_debug;
16039 else
16040 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16041
16042 /* The word/byte swap controls here control register access byte
16043 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16044 * setting below.
16045 */
16046 tp->misc_host_ctrl =
16047 MISC_HOST_CTRL_MASK_PCI_INT |
16048 MISC_HOST_CTRL_WORD_SWAP |
16049 MISC_HOST_CTRL_INDIR_ACCESS |
16050 MISC_HOST_CTRL_PCISTATE_RW;
16051
16052 /* The NONFRM (non-frame) byte/word swap controls take effect
16053 * on descriptor entries, anything which isn't packet data.
16054 *
16055 * The StrongARM chips on the board (one for tx, one for rx)
16056 * are running in big-endian mode.
16057 */
16058 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16059 GRC_MODE_WSWAP_NONFRM_DATA);
16060#ifdef __BIG_ENDIAN
16061 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16062#endif
16063 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016064 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000016065 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016066
Matt Carlsond5fe4882008-11-21 17:20:32 -080016067 tp->regs = pci_ioremap_bar(pdev, BAR_0);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010016068 if (!tp->regs) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016069 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016070 err = -ENOMEM;
16071 goto err_out_free_dev;
16072 }
16073
Matt Carlsonc9cab242011-07-13 09:27:27 +000016074 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16075 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16076 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16077 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16078 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
Michael Chan79d49692012-11-05 14:26:29 +000016079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
Matt Carlsonc9cab242011-07-13 09:27:27 +000016080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16083 tg3_flag_set(tp, ENABLE_APE);
16084 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16085 if (!tp->aperegs) {
16086 dev_err(&pdev->dev,
16087 "Cannot map APE registers, aborting\n");
16088 err = -ENOMEM;
16089 goto err_out_iounmap;
16090 }
16091 }
16092
Linus Torvalds1da177e2005-04-16 15:20:36 -070016093 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16094 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016095
Linus Torvalds1da177e2005-04-16 15:20:36 -070016096 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016097 dev->watchdog_timeo = TG3_TX_TIMEOUT;
Matt Carlson2ffcc982011-05-19 12:12:44 +000016098 dev->netdev_ops = &tg3_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016099 dev->irq = pdev->irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016100
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000016101 err = tg3_get_invariants(tp, ent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016102 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016103 dev_err(&pdev->dev,
16104 "Problem fetching invariants of chip, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016105 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016106 }
16107
Michael Chan4a29cc22006-03-19 13:21:12 -080016108 /* The EPB bridge inside 5714, 5715, and 5780 and any
16109 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080016110 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16111 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16112 * do DMA address check in tg3_start_xmit().
16113 */
Joe Perches63c3a662011-04-26 08:12:10 +000016114 if (tg3_flag(tp, IS_5788))
Yang Hongyang284901a2009-04-06 19:01:15 -070016115 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
Joe Perches63c3a662011-04-26 08:12:10 +000016116 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
Yang Hongyang50cf1562009-04-06 19:01:14 -070016117 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
Michael Chan72f2afb2006-03-06 19:28:35 -080016118#ifdef CONFIG_HIGHMEM
Yang Hongyang6a355282009-04-06 19:01:13 -070016119 dma_mask = DMA_BIT_MASK(64);
Michael Chan72f2afb2006-03-06 19:28:35 -080016120#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080016121 } else
Yang Hongyang6a355282009-04-06 19:01:13 -070016122 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
Michael Chan72f2afb2006-03-06 19:28:35 -080016123
16124 /* Configure DMA attributes. */
Yang Hongyang284901a2009-04-06 19:01:15 -070016125 if (dma_mask > DMA_BIT_MASK(32)) {
Michael Chan72f2afb2006-03-06 19:28:35 -080016126 err = pci_set_dma_mask(pdev, dma_mask);
16127 if (!err) {
Matt Carlson0da06062011-05-19 12:12:53 +000016128 features |= NETIF_F_HIGHDMA;
Michael Chan72f2afb2006-03-06 19:28:35 -080016129 err = pci_set_consistent_dma_mask(pdev,
16130 persist_dma_mask);
16131 if (err < 0) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016132 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16133 "DMA for consistent allocations\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016134 goto err_out_apeunmap;
Michael Chan72f2afb2006-03-06 19:28:35 -080016135 }
16136 }
16137 }
Yang Hongyang284901a2009-04-06 19:01:15 -070016138 if (err || dma_mask == DMA_BIT_MASK(32)) {
16139 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Michael Chan72f2afb2006-03-06 19:28:35 -080016140 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016141 dev_err(&pdev->dev,
16142 "No usable DMA configuration, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016143 goto err_out_apeunmap;
Michael Chan72f2afb2006-03-06 19:28:35 -080016144 }
16145 }
16146
Michael Chanfdfec1722005-07-25 12:31:48 -070016147 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016148
Matt Carlson0da06062011-05-19 12:12:53 +000016149 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16150
16151 /* 5700 B0 chips do not support checksumming correctly due
16152 * to hardware bugs.
16153 */
16154 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16155 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16156
16157 if (tg3_flag(tp, 5755_PLUS))
16158 features |= NETIF_F_IPV6_CSUM;
16159 }
16160
Michael Chan4e3a7aa2006-03-20 17:47:44 -080016161 /* TSO is on by default on chips that support hardware TSO.
16162 * Firmware TSO on older chips gives lower performance, so it
16163 * is off by default, but can be enabled using ethtool.
16164 */
Joe Perches63c3a662011-04-26 08:12:10 +000016165 if ((tg3_flag(tp, HW_TSO_1) ||
16166 tg3_flag(tp, HW_TSO_2) ||
16167 tg3_flag(tp, HW_TSO_3)) &&
Matt Carlson0da06062011-05-19 12:12:53 +000016168 (features & NETIF_F_IP_CSUM))
16169 features |= NETIF_F_TSO;
Joe Perches63c3a662011-04-26 08:12:10 +000016170 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
Matt Carlson0da06062011-05-19 12:12:53 +000016171 if (features & NETIF_F_IPV6_CSUM)
16172 features |= NETIF_F_TSO6;
Joe Perches63c3a662011-04-26 08:12:10 +000016173 if (tg3_flag(tp, HW_TSO_3) ||
Matt Carlsone849cdc2009-11-13 13:03:38 +000016174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070016175 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16176 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
Joe Perches63c3a662011-04-26 08:12:10 +000016177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michał Mirosławdc668912011-04-07 03:35:07 +000016178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
Matt Carlson0da06062011-05-19 12:12:53 +000016179 features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070016180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016181
Matt Carlsond542fe22011-05-19 16:02:43 +000016182 dev->features |= features;
16183 dev->vlan_features |= features;
16184
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000016185 /*
16186 * Add loopback capability only for a subset of devices that support
16187 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16188 * loopback for the remaining devices.
16189 */
16190 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16191 !tg3_flag(tp, CPMU_PRESENT))
16192 /* Add the loopback capability */
Matt Carlson0da06062011-05-19 12:12:53 +000016193 features |= NETIF_F_LOOPBACK;
16194
Matt Carlson0da06062011-05-19 12:12:53 +000016195 dev->hw_features |= features;
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000016196
Linus Torvalds1da177e2005-04-16 15:20:36 -070016197 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
Joe Perches63c3a662011-04-26 08:12:10 +000016198 !tg3_flag(tp, TSO_CAPABLE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070016199 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
Joe Perches63c3a662011-04-26 08:12:10 +000016200 tg3_flag_set(tp, MAX_RXPEND_64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016201 tp->rx_pending = 63;
16202 }
16203
Linus Torvalds1da177e2005-04-16 15:20:36 -070016204 err = tg3_get_device_address(tp);
16205 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016206 dev_err(&pdev->dev,
16207 "Could not obtain valid ethernet address, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016208 goto err_out_apeunmap;
Matt Carlson0d3031d2007-10-10 18:02:43 -070016209 }
16210
Matt Carlsonc88864d2007-11-12 21:07:01 -080016211 /*
16212 * Reset chip in case UNDI or EFI driver did not shutdown
16213 * DMA self test will enable WDMAC and we'll see (spurious)
16214 * pending DMA on the PCI bus at that point.
16215 */
16216 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16217 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16218 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16219 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16220 }
16221
16222 err = tg3_test_dma(tp);
16223 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016224 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
Matt Carlsonc88864d2007-11-12 21:07:01 -080016225 goto err_out_apeunmap;
16226 }
16227
Matt Carlson78f90dc2009-11-13 13:03:42 +000016228 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16229 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16230 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
Matt Carlson6fd45cb2010-09-15 08:59:57 +000016231 for (i = 0; i < tp->irq_max; i++) {
Matt Carlson78f90dc2009-11-13 13:03:42 +000016232 struct tg3_napi *tnapi = &tp->napi[i];
16233
16234 tnapi->tp = tp;
16235 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16236
16237 tnapi->int_mbox = intmbx;
Matt Carlson93a700a2011-08-31 11:44:54 +000016238 if (i <= 4)
Matt Carlson78f90dc2009-11-13 13:03:42 +000016239 intmbx += 0x8;
16240 else
16241 intmbx += 0x4;
16242
16243 tnapi->consmbox = rcvmbx;
16244 tnapi->prodmbox = sndmbx;
16245
Matt Carlson66cfd1b2010-09-30 10:34:30 +000016246 if (i)
Matt Carlson78f90dc2009-11-13 13:03:42 +000016247 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
Matt Carlson66cfd1b2010-09-30 10:34:30 +000016248 else
Matt Carlson78f90dc2009-11-13 13:03:42 +000016249 tnapi->coal_now = HOSTCC_MODE_NOW;
Matt Carlson78f90dc2009-11-13 13:03:42 +000016250
Joe Perches63c3a662011-04-26 08:12:10 +000016251 if (!tg3_flag(tp, SUPPORT_MSIX))
Matt Carlson78f90dc2009-11-13 13:03:42 +000016252 break;
16253
16254 /*
16255 * If we support MSIX, we'll be using RSS. If we're using
16256 * RSS, the first vector only handles link interrupts and the
16257 * remaining vectors handle rx and tx interrupts. Reuse the
16258 * mailbox values for the next iteration. The values we setup
16259 * above are still useful for the single vectored mode.
16260 */
16261 if (!i)
16262 continue;
16263
16264 rcvmbx += 0x8;
16265
16266 if (sndmbx & 0x4)
16267 sndmbx -= 0x4;
16268 else
16269 sndmbx += 0xc;
16270 }
16271
Matt Carlsonc88864d2007-11-12 21:07:01 -080016272 tg3_init_coal(tp);
16273
Michael Chanc49a1562006-12-17 17:07:29 -080016274 pci_set_drvdata(pdev, dev);
16275
Matt Carlsoncd0d7222011-07-13 09:27:33 +000016276 if (tg3_flag(tp, 5717_PLUS)) {
16277 /* Resume a low-power mode */
16278 tg3_frob_aux_power(tp, false);
16279 }
16280
Matt Carlson21f76382012-02-22 12:35:21 +000016281 tg3_timer_init(tp);
16282
Linus Torvalds1da177e2005-04-16 15:20:36 -070016283 err = register_netdev(dev);
16284 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016285 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070016286 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016287 }
16288
Joe Perches05dbe002010-02-17 19:44:19 +000016289 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16290 tp->board_part_number,
16291 tp->pci_chip_rev_id,
16292 tg3_bus_string(tp, str),
16293 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016294
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016295 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000016296 struct phy_device *phydev;
16297 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlson5129c3a2010-04-05 10:19:23 +000016298 netdev_info(dev,
16299 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
Joe Perches05dbe002010-02-17 19:44:19 +000016300 phydev->drv->name, dev_name(&phydev->dev));
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016301 } else {
16302 char *ethtype;
16303
16304 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16305 ethtype = "10/100Base-TX";
16306 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16307 ethtype = "1000Base-SX";
16308 else
16309 ethtype = "10/100/1000Base-T";
16310
Matt Carlson5129c3a2010-04-05 10:19:23 +000016311 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
Matt Carlson47007832011-04-20 07:57:43 +000016312 "(WireSpeed[%d], EEE[%d])\n",
16313 tg3_phy_string(tp), ethtype,
16314 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16315 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016316 }
Matt Carlsondf59c942008-11-03 16:52:56 -080016317
Joe Perches05dbe002010-02-17 19:44:19 +000016318 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Michał Mirosławdc668912011-04-07 03:35:07 +000016319 (dev->features & NETIF_F_RXCSUM) != 0,
Joe Perches63c3a662011-04-26 08:12:10 +000016320 tg3_flag(tp, USE_LINKCHG_REG) != 0,
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016321 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
Joe Perches63c3a662011-04-26 08:12:10 +000016322 tg3_flag(tp, ENABLE_ASF) != 0,
16323 tg3_flag(tp, TSO_CAPABLE) != 0);
Joe Perches05dbe002010-02-17 19:44:19 +000016324 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16325 tp->dma_rwctrl,
16326 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16327 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016328
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016329 pci_save_state(pdev);
16330
Linus Torvalds1da177e2005-04-16 15:20:36 -070016331 return 0;
16332
Matt Carlson0d3031d2007-10-10 18:02:43 -070016333err_out_apeunmap:
16334 if (tp->aperegs) {
16335 iounmap(tp->aperegs);
16336 tp->aperegs = NULL;
16337 }
16338
Linus Torvalds1da177e2005-04-16 15:20:36 -070016339err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070016340 if (tp->regs) {
16341 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070016342 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070016343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016344
16345err_out_free_dev:
16346 free_netdev(dev);
16347
Matt Carlson16821282011-07-13 09:27:28 +000016348err_out_power_down:
16349 pci_set_power_state(pdev, PCI_D3hot);
16350
Linus Torvalds1da177e2005-04-16 15:20:36 -070016351err_out_free_res:
16352 pci_release_regions(pdev);
16353
16354err_out_disable_pdev:
16355 pci_disable_device(pdev);
16356 pci_set_drvdata(pdev, NULL);
16357 return err;
16358}
16359
16360static void __devexit tg3_remove_one(struct pci_dev *pdev)
16361{
16362 struct net_device *dev = pci_get_drvdata(pdev);
16363
16364 if (dev) {
16365 struct tg3 *tp = netdev_priv(dev);
16366
Jesper Juhle3c55302012-04-09 22:50:15 +020016367 release_firmware(tp->fw);
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -080016368
Matt Carlsondb219972011-11-04 09:15:03 +000016369 tg3_reset_task_cancel(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070016370
David S. Miller1805b2f2011-10-24 18:18:09 -040016371 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016372 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070016373 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016374 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070016375
Linus Torvalds1da177e2005-04-16 15:20:36 -070016376 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070016377 if (tp->aperegs) {
16378 iounmap(tp->aperegs);
16379 tp->aperegs = NULL;
16380 }
Michael Chan68929142005-08-09 20:17:14 -070016381 if (tp->regs) {
16382 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070016383 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070016384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016385 free_netdev(dev);
16386 pci_release_regions(pdev);
16387 pci_disable_device(pdev);
16388 pci_set_drvdata(pdev, NULL);
16389 }
16390}
16391
Eric Dumazetaa6027c2011-01-01 05:22:46 +000016392#ifdef CONFIG_PM_SLEEP
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016393static int tg3_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016394{
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016395 struct pci_dev *pdev = to_pci_dev(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016396 struct net_device *dev = pci_get_drvdata(pdev);
16397 struct tg3 *tp = netdev_priv(dev);
16398 int err;
16399
16400 if (!netif_running(dev))
16401 return 0;
16402
Matt Carlsondb219972011-11-04 09:15:03 +000016403 tg3_reset_task_cancel(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016404 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016405 tg3_netif_stop(tp);
16406
Matt Carlson21f76382012-02-22 12:35:21 +000016407 tg3_timer_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016408
David S. Millerf47c11e2005-06-24 20:18:35 -070016409 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016410 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070016411 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016412
16413 netif_device_detach(dev);
16414
David S. Millerf47c11e2005-06-24 20:18:35 -070016415 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070016416 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Joe Perches63c3a662011-04-26 08:12:10 +000016417 tg3_flag_clear(tp, INIT_COMPLETE);
David S. Millerf47c11e2005-06-24 20:18:35 -070016418 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016419
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016420 err = tg3_power_down_prepare(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016421 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016422 int err2;
16423
David S. Millerf47c11e2005-06-24 20:18:35 -070016424 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016425
Joe Perches63c3a662011-04-26 08:12:10 +000016426 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016427 err2 = tg3_restart_hw(tp, 1);
16428 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070016429 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016430
Matt Carlson21f76382012-02-22 12:35:21 +000016431 tg3_timer_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016432
16433 netif_device_attach(dev);
16434 tg3_netif_start(tp);
16435
Michael Chanb9ec6c12006-07-25 16:37:27 -070016436out:
David S. Millerf47c11e2005-06-24 20:18:35 -070016437 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016438
16439 if (!err2)
16440 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016441 }
16442
16443 return err;
16444}
16445
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016446static int tg3_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016447{
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016448 struct pci_dev *pdev = to_pci_dev(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016449 struct net_device *dev = pci_get_drvdata(pdev);
16450 struct tg3 *tp = netdev_priv(dev);
16451 int err;
16452
16453 if (!netif_running(dev))
16454 return 0;
16455
Linus Torvalds1da177e2005-04-16 15:20:36 -070016456 netif_device_attach(dev);
16457
David S. Millerf47c11e2005-06-24 20:18:35 -070016458 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016459
Joe Perches63c3a662011-04-26 08:12:10 +000016460 tg3_flag_set(tp, INIT_COMPLETE);
Michael Chanb9ec6c12006-07-25 16:37:27 -070016461 err = tg3_restart_hw(tp, 1);
16462 if (err)
16463 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016464
Matt Carlson21f76382012-02-22 12:35:21 +000016465 tg3_timer_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016466
Linus Torvalds1da177e2005-04-16 15:20:36 -070016467 tg3_netif_start(tp);
16468
Michael Chanb9ec6c12006-07-25 16:37:27 -070016469out:
David S. Millerf47c11e2005-06-24 20:18:35 -070016470 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016471
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016472 if (!err)
16473 tg3_phy_start(tp);
16474
Michael Chanb9ec6c12006-07-25 16:37:27 -070016475 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016476}
16477
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016478static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
Eric Dumazetaa6027c2011-01-01 05:22:46 +000016479#define TG3_PM_OPS (&tg3_pm_ops)
16480
16481#else
16482
16483#define TG3_PM_OPS NULL
16484
16485#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016486
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016487/**
16488 * tg3_io_error_detected - called when PCI error is detected
16489 * @pdev: Pointer to PCI device
16490 * @state: The current pci connection state
16491 *
16492 * This function is called after a PCI bus error affecting
16493 * this device has been detected.
16494 */
16495static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16496 pci_channel_state_t state)
16497{
16498 struct net_device *netdev = pci_get_drvdata(pdev);
16499 struct tg3 *tp = netdev_priv(netdev);
16500 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16501
16502 netdev_info(netdev, "PCI I/O error detected\n");
16503
16504 rtnl_lock();
16505
16506 if (!netif_running(netdev))
16507 goto done;
16508
16509 tg3_phy_stop(tp);
16510
16511 tg3_netif_stop(tp);
16512
Matt Carlson21f76382012-02-22 12:35:21 +000016513 tg3_timer_stop(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016514
16515 /* Want to make sure that the reset task doesn't run */
Matt Carlsondb219972011-11-04 09:15:03 +000016516 tg3_reset_task_cancel(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016517
16518 netif_device_detach(netdev);
16519
16520 /* Clean up software state, even if MMIO is blocked */
16521 tg3_full_lock(tp, 0);
16522 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16523 tg3_full_unlock(tp);
16524
16525done:
16526 if (state == pci_channel_io_perm_failure)
16527 err = PCI_ERS_RESULT_DISCONNECT;
16528 else
16529 pci_disable_device(pdev);
16530
16531 rtnl_unlock();
16532
16533 return err;
16534}
16535
16536/**
16537 * tg3_io_slot_reset - called after the pci bus has been reset.
16538 * @pdev: Pointer to PCI device
16539 *
16540 * Restart the card from scratch, as if from a cold-boot.
16541 * At this point, the card has exprienced a hard reset,
16542 * followed by fixups by BIOS, and has its config space
16543 * set up identically to what it was at cold boot.
16544 */
16545static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16546{
16547 struct net_device *netdev = pci_get_drvdata(pdev);
16548 struct tg3 *tp = netdev_priv(netdev);
16549 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16550 int err;
16551
16552 rtnl_lock();
16553
16554 if (pci_enable_device(pdev)) {
16555 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16556 goto done;
16557 }
16558
16559 pci_set_master(pdev);
16560 pci_restore_state(pdev);
16561 pci_save_state(pdev);
16562
16563 if (!netif_running(netdev)) {
16564 rc = PCI_ERS_RESULT_RECOVERED;
16565 goto done;
16566 }
16567
16568 err = tg3_power_up(tp);
Matt Carlsonbed98292011-07-13 09:27:29 +000016569 if (err)
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016570 goto done;
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016571
16572 rc = PCI_ERS_RESULT_RECOVERED;
16573
16574done:
16575 rtnl_unlock();
16576
16577 return rc;
16578}
16579
16580/**
16581 * tg3_io_resume - called when traffic can start flowing again.
16582 * @pdev: Pointer to PCI device
16583 *
16584 * This callback is called when the error recovery driver tells
16585 * us that its OK to resume normal operation.
16586 */
16587static void tg3_io_resume(struct pci_dev *pdev)
16588{
16589 struct net_device *netdev = pci_get_drvdata(pdev);
16590 struct tg3 *tp = netdev_priv(netdev);
16591 int err;
16592
16593 rtnl_lock();
16594
16595 if (!netif_running(netdev))
16596 goto done;
16597
16598 tg3_full_lock(tp, 0);
Joe Perches63c3a662011-04-26 08:12:10 +000016599 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016600 err = tg3_restart_hw(tp, 1);
16601 tg3_full_unlock(tp);
16602 if (err) {
16603 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16604 goto done;
16605 }
16606
16607 netif_device_attach(netdev);
16608
Matt Carlson21f76382012-02-22 12:35:21 +000016609 tg3_timer_start(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016610
16611 tg3_netif_start(tp);
16612
16613 tg3_phy_start(tp);
16614
16615done:
16616 rtnl_unlock();
16617}
16618
Stephen Hemminger3646f0e2012-09-07 09:33:15 -070016619static const struct pci_error_handlers tg3_err_handler = {
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016620 .error_detected = tg3_io_error_detected,
16621 .slot_reset = tg3_io_slot_reset,
16622 .resume = tg3_io_resume
16623};
16624
Linus Torvalds1da177e2005-04-16 15:20:36 -070016625static struct pci_driver tg3_driver = {
16626 .name = DRV_MODULE_NAME,
16627 .id_table = tg3_pci_tbl,
16628 .probe = tg3_init_one,
16629 .remove = __devexit_p(tg3_remove_one),
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016630 .err_handler = &tg3_err_handler,
Eric Dumazetaa6027c2011-01-01 05:22:46 +000016631 .driver.pm = TG3_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -070016632};
16633
16634static int __init tg3_init(void)
16635{
Jeff Garzik29917622006-08-19 17:48:59 -040016636 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016637}
16638
16639static void __exit tg3_cleanup(void)
16640{
16641 pci_unregister_driver(&tg3_driver);
16642}
16643
16644module_init(tg3_init);
16645module_exit(tg3_cleanup);