blob: 6fbb5486163d73aa6f73dfdb032f8b3694557083 [file] [log] [blame]
Michael Chanb6016b72005-05-26 13:03:09 -07001/* bnx2.c: Broadcom NX2 network driver.
2 *
Michael Chan206cc832006-01-23 16:14:05 -08003 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
Michael Chanb6016b72005-05-26 13:03:09 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12#include "bnx2.h"
13#include "bnx2_fw.h"
14
15#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": "
Michael Chan206cc832006-01-23 16:14:05 -080017#define DRV_MODULE_VERSION "1.4.31"
18#define DRV_MODULE_RELDATE "January 19, 2006"
Michael Chanb6016b72005-05-26 13:03:09 -070019
20#define RUN_AT(x) (jiffies + (x))
21
22/* Time in jiffies before concluding the transmitter is hung. */
23#define TX_TIMEOUT (5*HZ)
24
25static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
Michael Chan05d0f1c2005-11-04 08:53:48 -080029MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
Michael Chanb6016b72005-05-26 13:03:09 -070030MODULE_LICENSE("GPL");
31MODULE_VERSION(DRV_MODULE_VERSION);
32
33static int disable_msi = 0;
34
35module_param(disable_msi, int, 0);
36MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38typedef enum {
39 BCM5706 = 0,
40 NC370T,
41 NC370I,
42 BCM5706S,
43 NC370F,
Michael Chan5b0c76a2005-11-04 08:45:49 -080044 BCM5708,
45 BCM5708S,
Michael Chanb6016b72005-05-26 13:03:09 -070046} board_t;
47
48/* indexed by board_t, above */
Arjan van de Venf71e1302006-03-03 21:33:57 -050049static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -070050 char *name;
51} board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
Michael Chan5b0c76a2005-11-04 08:45:49 -080057 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
Michael Chanb6016b72005-05-26 13:03:09 -070059 };
60
61static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
Michael Chan5b0c76a2005-11-04 08:45:49 -080068 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
Michael Chanb6016b72005-05-26 13:03:09 -070070 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
Michael Chan5b0c76a2005-11-04 08:45:49 -080074 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
Michael Chanb6016b72005-05-26 13:03:09 -070076 { 0, }
77};
78
79static struct flash_spec flash_table[] =
80{
81 /* Slow EEPROM */
Michael Chan37137702005-11-04 08:49:17 -080082 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
Michael Chanb6016b72005-05-26 13:03:09 -070083 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85 "EEPROM - slow"},
Michael Chan37137702005-11-04 08:49:17 -080086 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -070088 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
Michael Chan37137702005-11-04 08:49:17 -080089 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90 "Entry 0001"},
Michael Chanb6016b72005-05-26 13:03:09 -070091 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -080093 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -070094 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
Michael Chan37137702005-11-04 08:49:17 -080099 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
Michael Chanb6016b72005-05-26 13:03:09 -0700100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
Michael Chan37137702005-11-04 08:49:17 -0800103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107 "Entry 0100"},
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
124 /* Fast EEPROM */
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 "EEPROM - fast"},
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 "Entry 1001"},
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 1010"},
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148 "Entry 1100"},
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153 "Entry 1101"},
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
Michael Chanb6016b72005-05-26 13:03:09 -0700164};
165
166MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
167
Michael Chane89bbf12005-08-25 15:36:58 -0700168static inline u32 bnx2_tx_avail(struct bnx2 *bp)
169{
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
171
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
175}
176
Michael Chanb6016b72005-05-26 13:03:09 -0700177static u32
178bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
179{
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
182}
183
184static void
185bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
186{
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
189}
190
191static void
192bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
193{
194 offset += cid_addr;
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
197}
198
199static int
200bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
201{
202 u32 val1;
203 int i, ret;
204
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
208
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
211
212 udelay(40);
213 }
214
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
219
220 for (i = 0; i < 50; i++) {
221 udelay(10);
222
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225 udelay(5);
226
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
229
230 break;
231 }
232 }
233
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235 *val = 0x0;
236 ret = -EBUSY;
237 }
238 else {
239 *val = val1;
240 ret = 0;
241 }
242
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
246
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
249
250 udelay(40);
251 }
252
253 return ret;
254}
255
256static int
257bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
258{
259 u32 val1;
260 int i, ret;
261
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
265
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
268
269 udelay(40);
270 }
271
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
276
277 for (i = 0; i < 50; i++) {
278 udelay(10);
279
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282 udelay(5);
283 break;
284 }
285 }
286
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288 ret = -EBUSY;
289 else
290 ret = 0;
291
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
295
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298
299 udelay(40);
300 }
301
302 return ret;
303}
304
305static void
306bnx2_disable_int(struct bnx2 *bp)
307{
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
311}
312
313static void
314bnx2_enable_int(struct bnx2 *bp)
315{
316 u32 val;
317
318 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -0800319 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
320 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
321
322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chanb6016b72005-05-26 13:03:09 -0700323 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
324
325 val = REG_RD(bp, BNX2_HC_COMMAND);
326 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
327}
328
329static void
330bnx2_disable_int_sync(struct bnx2 *bp)
331{
332 atomic_inc(&bp->intr_sem);
333 bnx2_disable_int(bp);
334 synchronize_irq(bp->pdev->irq);
335}
336
337static void
338bnx2_netif_stop(struct bnx2 *bp)
339{
340 bnx2_disable_int_sync(bp);
341 if (netif_running(bp->dev)) {
342 netif_poll_disable(bp->dev);
343 netif_tx_disable(bp->dev);
344 bp->dev->trans_start = jiffies; /* prevent tx timeout */
345 }
346}
347
348static void
349bnx2_netif_start(struct bnx2 *bp)
350{
351 if (atomic_dec_and_test(&bp->intr_sem)) {
352 if (netif_running(bp->dev)) {
353 netif_wake_queue(bp->dev);
354 netif_poll_enable(bp->dev);
355 bnx2_enable_int(bp);
356 }
357 }
358}
359
360static void
361bnx2_free_mem(struct bnx2 *bp)
362{
363 if (bp->stats_blk) {
364 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
365 bp->stats_blk, bp->stats_blk_mapping);
366 bp->stats_blk = NULL;
367 }
368 if (bp->status_blk) {
369 pci_free_consistent(bp->pdev, sizeof(struct status_block),
370 bp->status_blk, bp->status_blk_mapping);
371 bp->status_blk = NULL;
372 }
373 if (bp->tx_desc_ring) {
374 pci_free_consistent(bp->pdev,
375 sizeof(struct tx_bd) * TX_DESC_CNT,
376 bp->tx_desc_ring, bp->tx_desc_mapping);
377 bp->tx_desc_ring = NULL;
378 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400379 kfree(bp->tx_buf_ring);
380 bp->tx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700381 if (bp->rx_desc_ring) {
382 pci_free_consistent(bp->pdev,
383 sizeof(struct rx_bd) * RX_DESC_CNT,
384 bp->rx_desc_ring, bp->rx_desc_mapping);
385 bp->rx_desc_ring = NULL;
386 }
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400387 kfree(bp->rx_buf_ring);
388 bp->rx_buf_ring = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700389}
390
391static int
392bnx2_alloc_mem(struct bnx2 *bp)
393{
394 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
395 GFP_KERNEL);
396 if (bp->tx_buf_ring == NULL)
397 return -ENOMEM;
398
399 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
400 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
401 sizeof(struct tx_bd) *
402 TX_DESC_CNT,
403 &bp->tx_desc_mapping);
404 if (bp->tx_desc_ring == NULL)
405 goto alloc_mem_err;
406
407 bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
408 GFP_KERNEL);
409 if (bp->rx_buf_ring == NULL)
410 goto alloc_mem_err;
411
412 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
413 bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
414 sizeof(struct rx_bd) *
415 RX_DESC_CNT,
416 &bp->rx_desc_mapping);
417 if (bp->rx_desc_ring == NULL)
418 goto alloc_mem_err;
419
420 bp->status_blk = pci_alloc_consistent(bp->pdev,
421 sizeof(struct status_block),
422 &bp->status_blk_mapping);
423 if (bp->status_blk == NULL)
424 goto alloc_mem_err;
425
426 memset(bp->status_blk, 0, sizeof(struct status_block));
427
428 bp->stats_blk = pci_alloc_consistent(bp->pdev,
429 sizeof(struct statistics_block),
430 &bp->stats_blk_mapping);
431 if (bp->stats_blk == NULL)
432 goto alloc_mem_err;
433
434 memset(bp->stats_blk, 0, sizeof(struct statistics_block));
435
436 return 0;
437
438alloc_mem_err:
439 bnx2_free_mem(bp);
440 return -ENOMEM;
441}
442
443static void
Michael Chane3648b32005-11-04 08:51:21 -0800444bnx2_report_fw_link(struct bnx2 *bp)
445{
446 u32 fw_link_status = 0;
447
448 if (bp->link_up) {
449 u32 bmsr;
450
451 switch (bp->line_speed) {
452 case SPEED_10:
453 if (bp->duplex == DUPLEX_HALF)
454 fw_link_status = BNX2_LINK_STATUS_10HALF;
455 else
456 fw_link_status = BNX2_LINK_STATUS_10FULL;
457 break;
458 case SPEED_100:
459 if (bp->duplex == DUPLEX_HALF)
460 fw_link_status = BNX2_LINK_STATUS_100HALF;
461 else
462 fw_link_status = BNX2_LINK_STATUS_100FULL;
463 break;
464 case SPEED_1000:
465 if (bp->duplex == DUPLEX_HALF)
466 fw_link_status = BNX2_LINK_STATUS_1000HALF;
467 else
468 fw_link_status = BNX2_LINK_STATUS_1000FULL;
469 break;
470 case SPEED_2500:
471 if (bp->duplex == DUPLEX_HALF)
472 fw_link_status = BNX2_LINK_STATUS_2500HALF;
473 else
474 fw_link_status = BNX2_LINK_STATUS_2500FULL;
475 break;
476 }
477
478 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
479
480 if (bp->autoneg) {
481 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
482
483 bnx2_read_phy(bp, MII_BMSR, &bmsr);
484 bnx2_read_phy(bp, MII_BMSR, &bmsr);
485
486 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
487 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
488 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
489 else
490 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
491 }
492 }
493 else
494 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
495
496 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
497}
498
499static void
Michael Chanb6016b72005-05-26 13:03:09 -0700500bnx2_report_link(struct bnx2 *bp)
501{
502 if (bp->link_up) {
503 netif_carrier_on(bp->dev);
504 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
505
506 printk("%d Mbps ", bp->line_speed);
507
508 if (bp->duplex == DUPLEX_FULL)
509 printk("full duplex");
510 else
511 printk("half duplex");
512
513 if (bp->flow_ctrl) {
514 if (bp->flow_ctrl & FLOW_CTRL_RX) {
515 printk(", receive ");
516 if (bp->flow_ctrl & FLOW_CTRL_TX)
517 printk("& transmit ");
518 }
519 else {
520 printk(", transmit ");
521 }
522 printk("flow control ON");
523 }
524 printk("\n");
525 }
526 else {
527 netif_carrier_off(bp->dev);
528 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
529 }
Michael Chane3648b32005-11-04 08:51:21 -0800530
531 bnx2_report_fw_link(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700532}
533
534static void
535bnx2_resolve_flow_ctrl(struct bnx2 *bp)
536{
537 u32 local_adv, remote_adv;
538
539 bp->flow_ctrl = 0;
540 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
541 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
542
543 if (bp->duplex == DUPLEX_FULL) {
544 bp->flow_ctrl = bp->req_flow_ctrl;
545 }
546 return;
547 }
548
549 if (bp->duplex != DUPLEX_FULL) {
550 return;
551 }
552
Michael Chan5b0c76a2005-11-04 08:45:49 -0800553 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
554 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
555 u32 val;
556
557 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
558 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
559 bp->flow_ctrl |= FLOW_CTRL_TX;
560 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
561 bp->flow_ctrl |= FLOW_CTRL_RX;
562 return;
563 }
564
Michael Chanb6016b72005-05-26 13:03:09 -0700565 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
566 bnx2_read_phy(bp, MII_LPA, &remote_adv);
567
568 if (bp->phy_flags & PHY_SERDES_FLAG) {
569 u32 new_local_adv = 0;
570 u32 new_remote_adv = 0;
571
572 if (local_adv & ADVERTISE_1000XPAUSE)
573 new_local_adv |= ADVERTISE_PAUSE_CAP;
574 if (local_adv & ADVERTISE_1000XPSE_ASYM)
575 new_local_adv |= ADVERTISE_PAUSE_ASYM;
576 if (remote_adv & ADVERTISE_1000XPAUSE)
577 new_remote_adv |= ADVERTISE_PAUSE_CAP;
578 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
579 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
580
581 local_adv = new_local_adv;
582 remote_adv = new_remote_adv;
583 }
584
585 /* See Table 28B-3 of 802.3ab-1999 spec. */
586 if (local_adv & ADVERTISE_PAUSE_CAP) {
587 if(local_adv & ADVERTISE_PAUSE_ASYM) {
588 if (remote_adv & ADVERTISE_PAUSE_CAP) {
589 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
590 }
591 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
592 bp->flow_ctrl = FLOW_CTRL_RX;
593 }
594 }
595 else {
596 if (remote_adv & ADVERTISE_PAUSE_CAP) {
597 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
598 }
599 }
600 }
601 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
602 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
603 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
604
605 bp->flow_ctrl = FLOW_CTRL_TX;
606 }
607 }
608}
609
610static int
Michael Chan5b0c76a2005-11-04 08:45:49 -0800611bnx2_5708s_linkup(struct bnx2 *bp)
612{
613 u32 val;
614
615 bp->link_up = 1;
616 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
617 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
618 case BCM5708S_1000X_STAT1_SPEED_10:
619 bp->line_speed = SPEED_10;
620 break;
621 case BCM5708S_1000X_STAT1_SPEED_100:
622 bp->line_speed = SPEED_100;
623 break;
624 case BCM5708S_1000X_STAT1_SPEED_1G:
625 bp->line_speed = SPEED_1000;
626 break;
627 case BCM5708S_1000X_STAT1_SPEED_2G5:
628 bp->line_speed = SPEED_2500;
629 break;
630 }
631 if (val & BCM5708S_1000X_STAT1_FD)
632 bp->duplex = DUPLEX_FULL;
633 else
634 bp->duplex = DUPLEX_HALF;
635
636 return 0;
637}
638
639static int
640bnx2_5706s_linkup(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -0700641{
642 u32 bmcr, local_adv, remote_adv, common;
643
644 bp->link_up = 1;
645 bp->line_speed = SPEED_1000;
646
647 bnx2_read_phy(bp, MII_BMCR, &bmcr);
648 if (bmcr & BMCR_FULLDPLX) {
649 bp->duplex = DUPLEX_FULL;
650 }
651 else {
652 bp->duplex = DUPLEX_HALF;
653 }
654
655 if (!(bmcr & BMCR_ANENABLE)) {
656 return 0;
657 }
658
659 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
660 bnx2_read_phy(bp, MII_LPA, &remote_adv);
661
662 common = local_adv & remote_adv;
663 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
664
665 if (common & ADVERTISE_1000XFULL) {
666 bp->duplex = DUPLEX_FULL;
667 }
668 else {
669 bp->duplex = DUPLEX_HALF;
670 }
671 }
672
673 return 0;
674}
675
676static int
677bnx2_copper_linkup(struct bnx2 *bp)
678{
679 u32 bmcr;
680
681 bnx2_read_phy(bp, MII_BMCR, &bmcr);
682 if (bmcr & BMCR_ANENABLE) {
683 u32 local_adv, remote_adv, common;
684
685 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
686 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
687
688 common = local_adv & (remote_adv >> 2);
689 if (common & ADVERTISE_1000FULL) {
690 bp->line_speed = SPEED_1000;
691 bp->duplex = DUPLEX_FULL;
692 }
693 else if (common & ADVERTISE_1000HALF) {
694 bp->line_speed = SPEED_1000;
695 bp->duplex = DUPLEX_HALF;
696 }
697 else {
698 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
699 bnx2_read_phy(bp, MII_LPA, &remote_adv);
700
701 common = local_adv & remote_adv;
702 if (common & ADVERTISE_100FULL) {
703 bp->line_speed = SPEED_100;
704 bp->duplex = DUPLEX_FULL;
705 }
706 else if (common & ADVERTISE_100HALF) {
707 bp->line_speed = SPEED_100;
708 bp->duplex = DUPLEX_HALF;
709 }
710 else if (common & ADVERTISE_10FULL) {
711 bp->line_speed = SPEED_10;
712 bp->duplex = DUPLEX_FULL;
713 }
714 else if (common & ADVERTISE_10HALF) {
715 bp->line_speed = SPEED_10;
716 bp->duplex = DUPLEX_HALF;
717 }
718 else {
719 bp->line_speed = 0;
720 bp->link_up = 0;
721 }
722 }
723 }
724 else {
725 if (bmcr & BMCR_SPEED100) {
726 bp->line_speed = SPEED_100;
727 }
728 else {
729 bp->line_speed = SPEED_10;
730 }
731 if (bmcr & BMCR_FULLDPLX) {
732 bp->duplex = DUPLEX_FULL;
733 }
734 else {
735 bp->duplex = DUPLEX_HALF;
736 }
737 }
738
739 return 0;
740}
741
742static int
743bnx2_set_mac_link(struct bnx2 *bp)
744{
745 u32 val;
746
747 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
748 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
749 (bp->duplex == DUPLEX_HALF)) {
750 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
751 }
752
753 /* Configure the EMAC mode register. */
754 val = REG_RD(bp, BNX2_EMAC_MODE);
755
756 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
Michael Chan5b0c76a2005-11-04 08:45:49 -0800757 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
758 BNX2_EMAC_MODE_25G);
Michael Chanb6016b72005-05-26 13:03:09 -0700759
760 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800761 switch (bp->line_speed) {
762 case SPEED_10:
763 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
764 val |= BNX2_EMAC_MODE_PORT_MII_10;
765 break;
766 }
767 /* fall through */
768 case SPEED_100:
769 val |= BNX2_EMAC_MODE_PORT_MII;
770 break;
771 case SPEED_2500:
772 val |= BNX2_EMAC_MODE_25G;
773 /* fall through */
774 case SPEED_1000:
775 val |= BNX2_EMAC_MODE_PORT_GMII;
776 break;
777 }
Michael Chanb6016b72005-05-26 13:03:09 -0700778 }
779 else {
780 val |= BNX2_EMAC_MODE_PORT_GMII;
781 }
782
783 /* Set the MAC to operate in the appropriate duplex mode. */
784 if (bp->duplex == DUPLEX_HALF)
785 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
786 REG_WR(bp, BNX2_EMAC_MODE, val);
787
788 /* Enable/disable rx PAUSE. */
789 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
790
791 if (bp->flow_ctrl & FLOW_CTRL_RX)
792 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
793 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
794
795 /* Enable/disable tx PAUSE. */
796 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
797 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
798
799 if (bp->flow_ctrl & FLOW_CTRL_TX)
800 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
801 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
802
803 /* Acknowledge the interrupt. */
804 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
805
806 return 0;
807}
808
809static int
810bnx2_set_link(struct bnx2 *bp)
811{
812 u32 bmsr;
813 u8 link_up;
814
815 if (bp->loopback == MAC_LOOPBACK) {
816 bp->link_up = 1;
817 return 0;
818 }
819
820 link_up = bp->link_up;
821
822 bnx2_read_phy(bp, MII_BMSR, &bmsr);
823 bnx2_read_phy(bp, MII_BMSR, &bmsr);
824
825 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
826 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
827 u32 val;
828
829 val = REG_RD(bp, BNX2_EMAC_STATUS);
830 if (val & BNX2_EMAC_STATUS_LINK)
831 bmsr |= BMSR_LSTATUS;
832 else
833 bmsr &= ~BMSR_LSTATUS;
834 }
835
836 if (bmsr & BMSR_LSTATUS) {
837 bp->link_up = 1;
838
839 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800840 if (CHIP_NUM(bp) == CHIP_NUM_5706)
841 bnx2_5706s_linkup(bp);
842 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
843 bnx2_5708s_linkup(bp);
Michael Chanb6016b72005-05-26 13:03:09 -0700844 }
845 else {
846 bnx2_copper_linkup(bp);
847 }
848 bnx2_resolve_flow_ctrl(bp);
849 }
850 else {
851 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
852 (bp->autoneg & AUTONEG_SPEED)) {
853
854 u32 bmcr;
855
856 bnx2_read_phy(bp, MII_BMCR, &bmcr);
857 if (!(bmcr & BMCR_ANENABLE)) {
858 bnx2_write_phy(bp, MII_BMCR, bmcr |
859 BMCR_ANENABLE);
860 }
861 }
862 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
863 bp->link_up = 0;
864 }
865
866 if (bp->link_up != link_up) {
867 bnx2_report_link(bp);
868 }
869
870 bnx2_set_mac_link(bp);
871
872 return 0;
873}
874
875static int
876bnx2_reset_phy(struct bnx2 *bp)
877{
878 int i;
879 u32 reg;
880
881 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
882
883#define PHY_RESET_MAX_WAIT 100
884 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
885 udelay(10);
886
887 bnx2_read_phy(bp, MII_BMCR, &reg);
888 if (!(reg & BMCR_RESET)) {
889 udelay(20);
890 break;
891 }
892 }
893 if (i == PHY_RESET_MAX_WAIT) {
894 return -EBUSY;
895 }
896 return 0;
897}
898
899static u32
900bnx2_phy_get_pause_adv(struct bnx2 *bp)
901{
902 u32 adv = 0;
903
904 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
905 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
906
907 if (bp->phy_flags & PHY_SERDES_FLAG) {
908 adv = ADVERTISE_1000XPAUSE;
909 }
910 else {
911 adv = ADVERTISE_PAUSE_CAP;
912 }
913 }
914 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
915 if (bp->phy_flags & PHY_SERDES_FLAG) {
916 adv = ADVERTISE_1000XPSE_ASYM;
917 }
918 else {
919 adv = ADVERTISE_PAUSE_ASYM;
920 }
921 }
922 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
923 if (bp->phy_flags & PHY_SERDES_FLAG) {
924 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
925 }
926 else {
927 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
928 }
929 }
930 return adv;
931}
932
933static int
934bnx2_setup_serdes_phy(struct bnx2 *bp)
935{
Michael Chan5b0c76a2005-11-04 08:45:49 -0800936 u32 adv, bmcr, up1;
Michael Chanb6016b72005-05-26 13:03:09 -0700937 u32 new_adv = 0;
938
939 if (!(bp->autoneg & AUTONEG_SPEED)) {
940 u32 new_bmcr;
Michael Chan5b0c76a2005-11-04 08:45:49 -0800941 int force_link_down = 0;
942
943 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
944 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
945 if (up1 & BCM5708S_UP1_2G5) {
946 up1 &= ~BCM5708S_UP1_2G5;
947 bnx2_write_phy(bp, BCM5708S_UP1, up1);
948 force_link_down = 1;
949 }
950 }
951
952 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
953 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
Michael Chanb6016b72005-05-26 13:03:09 -0700954
955 bnx2_read_phy(bp, MII_BMCR, &bmcr);
956 new_bmcr = bmcr & ~BMCR_ANENABLE;
957 new_bmcr |= BMCR_SPEED1000;
958 if (bp->req_duplex == DUPLEX_FULL) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800959 adv |= ADVERTISE_1000XFULL;
Michael Chanb6016b72005-05-26 13:03:09 -0700960 new_bmcr |= BMCR_FULLDPLX;
961 }
962 else {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800963 adv |= ADVERTISE_1000XHALF;
Michael Chanb6016b72005-05-26 13:03:09 -0700964 new_bmcr &= ~BMCR_FULLDPLX;
965 }
Michael Chan5b0c76a2005-11-04 08:45:49 -0800966 if ((new_bmcr != bmcr) || (force_link_down)) {
Michael Chanb6016b72005-05-26 13:03:09 -0700967 /* Force a link down visible on the other side */
968 if (bp->link_up) {
Michael Chan5b0c76a2005-11-04 08:45:49 -0800969 bnx2_write_phy(bp, MII_ADVERTISE, adv &
970 ~(ADVERTISE_1000XFULL |
971 ADVERTISE_1000XHALF));
Michael Chanb6016b72005-05-26 13:03:09 -0700972 bnx2_write_phy(bp, MII_BMCR, bmcr |
973 BMCR_ANRESTART | BMCR_ANENABLE);
974
975 bp->link_up = 0;
976 netif_carrier_off(bp->dev);
Michael Chan5b0c76a2005-11-04 08:45:49 -0800977 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
Michael Chanb6016b72005-05-26 13:03:09 -0700978 }
Michael Chan5b0c76a2005-11-04 08:45:49 -0800979 bnx2_write_phy(bp, MII_ADVERTISE, adv);
Michael Chanb6016b72005-05-26 13:03:09 -0700980 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
981 }
982 return 0;
983 }
984
Michael Chan5b0c76a2005-11-04 08:45:49 -0800985 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
986 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
987 up1 |= BCM5708S_UP1_2G5;
988 bnx2_write_phy(bp, BCM5708S_UP1, up1);
989 }
990
Michael Chanb6016b72005-05-26 13:03:09 -0700991 if (bp->advertising & ADVERTISED_1000baseT_Full)
992 new_adv |= ADVERTISE_1000XFULL;
993
994 new_adv |= bnx2_phy_get_pause_adv(bp);
995
996 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
997 bnx2_read_phy(bp, MII_BMCR, &bmcr);
998
999 bp->serdes_an_pending = 0;
1000 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1001 /* Force a link down visible on the other side */
1002 if (bp->link_up) {
1003 int i;
1004
1005 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1006 for (i = 0; i < 110; i++) {
1007 udelay(100);
1008 }
1009 }
1010
1011 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1012 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1013 BMCR_ANENABLE);
Michael Chancd339a02005-08-25 15:35:24 -07001014 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1015 /* Speed up link-up time when the link partner
1016 * does not autonegotiate which is very common
1017 * in blade servers. Some blade servers use
1018 * IPMI for kerboard input and it's important
1019 * to minimize link disruptions. Autoneg. involves
1020 * exchanging base pages plus 3 next pages and
1021 * normally completes in about 120 msec.
1022 */
1023 bp->current_interval = SERDES_AN_TIMEOUT;
1024 bp->serdes_an_pending = 1;
1025 mod_timer(&bp->timer, jiffies + bp->current_interval);
1026 }
Michael Chanb6016b72005-05-26 13:03:09 -07001027 }
1028
1029 return 0;
1030}
1031
1032#define ETHTOOL_ALL_FIBRE_SPEED \
1033 (ADVERTISED_1000baseT_Full)
1034
1035#define ETHTOOL_ALL_COPPER_SPEED \
1036 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1037 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1038 ADVERTISED_1000baseT_Full)
1039
1040#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1041 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1042
1043#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1044
1045static int
1046bnx2_setup_copper_phy(struct bnx2 *bp)
1047{
1048 u32 bmcr;
1049 u32 new_bmcr;
1050
1051 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 u32 adv_reg, adv1000_reg;
1055 u32 new_adv_reg = 0;
1056 u32 new_adv1000_reg = 0;
1057
1058 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1059 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1060 ADVERTISE_PAUSE_ASYM);
1061
1062 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1063 adv1000_reg &= PHY_ALL_1000_SPEED;
1064
1065 if (bp->advertising & ADVERTISED_10baseT_Half)
1066 new_adv_reg |= ADVERTISE_10HALF;
1067 if (bp->advertising & ADVERTISED_10baseT_Full)
1068 new_adv_reg |= ADVERTISE_10FULL;
1069 if (bp->advertising & ADVERTISED_100baseT_Half)
1070 new_adv_reg |= ADVERTISE_100HALF;
1071 if (bp->advertising & ADVERTISED_100baseT_Full)
1072 new_adv_reg |= ADVERTISE_100FULL;
1073 if (bp->advertising & ADVERTISED_1000baseT_Full)
1074 new_adv1000_reg |= ADVERTISE_1000FULL;
1075
1076 new_adv_reg |= ADVERTISE_CSMA;
1077
1078 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1079
1080 if ((adv1000_reg != new_adv1000_reg) ||
1081 (adv_reg != new_adv_reg) ||
1082 ((bmcr & BMCR_ANENABLE) == 0)) {
1083
1084 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1085 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1086 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1087 BMCR_ANENABLE);
1088 }
1089 else if (bp->link_up) {
1090 /* Flow ctrl may have changed from auto to forced */
1091 /* or vice-versa. */
1092
1093 bnx2_resolve_flow_ctrl(bp);
1094 bnx2_set_mac_link(bp);
1095 }
1096 return 0;
1097 }
1098
1099 new_bmcr = 0;
1100 if (bp->req_line_speed == SPEED_100) {
1101 new_bmcr |= BMCR_SPEED100;
1102 }
1103 if (bp->req_duplex == DUPLEX_FULL) {
1104 new_bmcr |= BMCR_FULLDPLX;
1105 }
1106 if (new_bmcr != bmcr) {
1107 u32 bmsr;
1108 int i = 0;
1109
1110 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1111 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1112
1113 if (bmsr & BMSR_LSTATUS) {
1114 /* Force link down */
1115 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1116 do {
1117 udelay(100);
1118 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1119 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1120 i++;
1121 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1122 }
1123
1124 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1125
1126 /* Normally, the new speed is setup after the link has
1127 * gone down and up again. In some cases, link will not go
1128 * down so we need to set up the new speed here.
1129 */
1130 if (bmsr & BMSR_LSTATUS) {
1131 bp->line_speed = bp->req_line_speed;
1132 bp->duplex = bp->req_duplex;
1133 bnx2_resolve_flow_ctrl(bp);
1134 bnx2_set_mac_link(bp);
1135 }
1136 }
1137 return 0;
1138}
1139
1140static int
1141bnx2_setup_phy(struct bnx2 *bp)
1142{
1143 if (bp->loopback == MAC_LOOPBACK)
1144 return 0;
1145
1146 if (bp->phy_flags & PHY_SERDES_FLAG) {
1147 return (bnx2_setup_serdes_phy(bp));
1148 }
1149 else {
1150 return (bnx2_setup_copper_phy(bp));
1151 }
1152}
1153
1154static int
Michael Chan5b0c76a2005-11-04 08:45:49 -08001155bnx2_init_5708s_phy(struct bnx2 *bp)
1156{
1157 u32 val;
1158
1159 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1160 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1161 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1162
1163 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1164 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1165 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1166
1167 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1168 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1169 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1170
1171 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1172 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1173 val |= BCM5708S_UP1_2G5;
1174 bnx2_write_phy(bp, BCM5708S_UP1, val);
1175 }
1176
1177 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
Michael Chandda1e392006-01-23 16:08:14 -08001178 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1179 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001180 /* increase tx signal amplitude */
1181 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1182 BCM5708S_BLK_ADDR_TX_MISC);
1183 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1184 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1185 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1186 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1187 }
1188
Michael Chane3648b32005-11-04 08:51:21 -08001189 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
Michael Chan5b0c76a2005-11-04 08:45:49 -08001190 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1191
1192 if (val) {
1193 u32 is_backplane;
1194
Michael Chane3648b32005-11-04 08:51:21 -08001195 is_backplane = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08001196 BNX2_SHARED_HW_CFG_CONFIG);
1197 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1198 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1199 BCM5708S_BLK_ADDR_TX_MISC);
1200 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1201 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1202 BCM5708S_BLK_ADDR_DIG);
1203 }
1204 }
1205 return 0;
1206}
1207
1208static int
1209bnx2_init_5706s_phy(struct bnx2 *bp)
Michael Chanb6016b72005-05-26 13:03:09 -07001210{
1211 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1212
1213 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1214 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1215 }
1216
1217 if (bp->dev->mtu > 1500) {
1218 u32 val;
1219
1220 /* Set extended packet length bit */
1221 bnx2_write_phy(bp, 0x18, 0x7);
1222 bnx2_read_phy(bp, 0x18, &val);
1223 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1224
1225 bnx2_write_phy(bp, 0x1c, 0x6c00);
1226 bnx2_read_phy(bp, 0x1c, &val);
1227 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1228 }
1229 else {
1230 u32 val;
1231
1232 bnx2_write_phy(bp, 0x18, 0x7);
1233 bnx2_read_phy(bp, 0x18, &val);
1234 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1235
1236 bnx2_write_phy(bp, 0x1c, 0x6c00);
1237 bnx2_read_phy(bp, 0x1c, &val);
1238 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1239 }
1240
1241 return 0;
1242}
1243
1244static int
1245bnx2_init_copper_phy(struct bnx2 *bp)
1246{
Michael Chan5b0c76a2005-11-04 08:45:49 -08001247 u32 val;
1248
Michael Chanb6016b72005-05-26 13:03:09 -07001249 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1250
1251 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1252 bnx2_write_phy(bp, 0x18, 0x0c00);
1253 bnx2_write_phy(bp, 0x17, 0x000a);
1254 bnx2_write_phy(bp, 0x15, 0x310b);
1255 bnx2_write_phy(bp, 0x17, 0x201f);
1256 bnx2_write_phy(bp, 0x15, 0x9506);
1257 bnx2_write_phy(bp, 0x17, 0x401f);
1258 bnx2_write_phy(bp, 0x15, 0x14e2);
1259 bnx2_write_phy(bp, 0x18, 0x0400);
1260 }
1261
1262 if (bp->dev->mtu > 1500) {
Michael Chanb6016b72005-05-26 13:03:09 -07001263 /* Set extended packet length bit */
1264 bnx2_write_phy(bp, 0x18, 0x7);
1265 bnx2_read_phy(bp, 0x18, &val);
1266 bnx2_write_phy(bp, 0x18, val | 0x4000);
1267
1268 bnx2_read_phy(bp, 0x10, &val);
1269 bnx2_write_phy(bp, 0x10, val | 0x1);
1270 }
1271 else {
Michael Chanb6016b72005-05-26 13:03:09 -07001272 bnx2_write_phy(bp, 0x18, 0x7);
1273 bnx2_read_phy(bp, 0x18, &val);
1274 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1275
1276 bnx2_read_phy(bp, 0x10, &val);
1277 bnx2_write_phy(bp, 0x10, val & ~0x1);
1278 }
1279
Michael Chan5b0c76a2005-11-04 08:45:49 -08001280 /* ethernet@wirespeed */
1281 bnx2_write_phy(bp, 0x18, 0x7007);
1282 bnx2_read_phy(bp, 0x18, &val);
1283 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
Michael Chanb6016b72005-05-26 13:03:09 -07001284 return 0;
1285}
1286
1287
1288static int
1289bnx2_init_phy(struct bnx2 *bp)
1290{
1291 u32 val;
1292 int rc = 0;
1293
1294 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1295 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1296
1297 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1298
1299 bnx2_reset_phy(bp);
1300
1301 bnx2_read_phy(bp, MII_PHYSID1, &val);
1302 bp->phy_id = val << 16;
1303 bnx2_read_phy(bp, MII_PHYSID2, &val);
1304 bp->phy_id |= val & 0xffff;
1305
1306 if (bp->phy_flags & PHY_SERDES_FLAG) {
Michael Chan5b0c76a2005-11-04 08:45:49 -08001307 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1308 rc = bnx2_init_5706s_phy(bp);
1309 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1310 rc = bnx2_init_5708s_phy(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001311 }
1312 else {
1313 rc = bnx2_init_copper_phy(bp);
1314 }
1315
1316 bnx2_setup_phy(bp);
1317
1318 return rc;
1319}
1320
1321static int
1322bnx2_set_mac_loopback(struct bnx2 *bp)
1323{
1324 u32 mac_mode;
1325
1326 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1327 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1328 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1329 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1330 bp->link_up = 1;
1331 return 0;
1332}
1333
Michael Chanbc5a0692006-01-23 16:13:22 -08001334static int bnx2_test_link(struct bnx2 *);
1335
1336static int
1337bnx2_set_phy_loopback(struct bnx2 *bp)
1338{
1339 u32 mac_mode;
1340 int rc, i;
1341
1342 spin_lock_bh(&bp->phy_lock);
1343 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1344 BMCR_SPEED1000);
1345 spin_unlock_bh(&bp->phy_lock);
1346 if (rc)
1347 return rc;
1348
1349 for (i = 0; i < 10; i++) {
1350 if (bnx2_test_link(bp) == 0)
1351 break;
1352 udelay(10);
1353 }
1354
1355 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1356 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1357 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1358 BNX2_EMAC_MODE_25G);
1359
1360 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1361 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1362 bp->link_up = 1;
1363 return 0;
1364}
1365
Michael Chanb6016b72005-05-26 13:03:09 -07001366static int
Michael Chanb090ae22006-01-23 16:07:10 -08001367bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
Michael Chanb6016b72005-05-26 13:03:09 -07001368{
1369 int i;
1370 u32 val;
1371
Michael Chanb6016b72005-05-26 13:03:09 -07001372 bp->fw_wr_seq++;
1373 msg_data |= bp->fw_wr_seq;
1374
Michael Chane3648b32005-11-04 08:51:21 -08001375 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001376
1377 /* wait for an acknowledgement. */
Michael Chanb090ae22006-01-23 16:07:10 -08001378 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1379 msleep(10);
Michael Chanb6016b72005-05-26 13:03:09 -07001380
Michael Chane3648b32005-11-04 08:51:21 -08001381 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
Michael Chanb6016b72005-05-26 13:03:09 -07001382
1383 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1384 break;
1385 }
Michael Chanb090ae22006-01-23 16:07:10 -08001386 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1387 return 0;
Michael Chanb6016b72005-05-26 13:03:09 -07001388
1389 /* If we timed out, inform the firmware that this is the case. */
Michael Chanb090ae22006-01-23 16:07:10 -08001390 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1391 if (!silent)
1392 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1393 "%x\n", msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001394
1395 msg_data &= ~BNX2_DRV_MSG_CODE;
1396 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1397
Michael Chane3648b32005-11-04 08:51:21 -08001398 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
Michael Chanb6016b72005-05-26 13:03:09 -07001399
Michael Chanb6016b72005-05-26 13:03:09 -07001400 return -EBUSY;
1401 }
1402
Michael Chanb090ae22006-01-23 16:07:10 -08001403 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1404 return -EIO;
1405
Michael Chanb6016b72005-05-26 13:03:09 -07001406 return 0;
1407}
1408
1409static void
1410bnx2_init_context(struct bnx2 *bp)
1411{
1412 u32 vcid;
1413
1414 vcid = 96;
1415 while (vcid) {
1416 u32 vcid_addr, pcid_addr, offset;
1417
1418 vcid--;
1419
1420 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1421 u32 new_vcid;
1422
1423 vcid_addr = GET_PCID_ADDR(vcid);
1424 if (vcid & 0x8) {
1425 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1426 }
1427 else {
1428 new_vcid = vcid;
1429 }
1430 pcid_addr = GET_PCID_ADDR(new_vcid);
1431 }
1432 else {
1433 vcid_addr = GET_CID_ADDR(vcid);
1434 pcid_addr = vcid_addr;
1435 }
1436
1437 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1438 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1439
1440 /* Zero out the context. */
1441 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1442 CTX_WR(bp, 0x00, offset, 0);
1443 }
1444
1445 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1446 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1447 }
1448}
1449
1450static int
1451bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1452{
1453 u16 *good_mbuf;
1454 u32 good_mbuf_cnt;
1455 u32 val;
1456
1457 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1458 if (good_mbuf == NULL) {
1459 printk(KERN_ERR PFX "Failed to allocate memory in "
1460 "bnx2_alloc_bad_rbuf\n");
1461 return -ENOMEM;
1462 }
1463
1464 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1465 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1466
1467 good_mbuf_cnt = 0;
1468
1469 /* Allocate a bunch of mbufs and save the good ones in an array. */
1470 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1471 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1472 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1473
1474 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1475
1476 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1477
1478 /* The addresses with Bit 9 set are bad memory blocks. */
1479 if (!(val & (1 << 9))) {
1480 good_mbuf[good_mbuf_cnt] = (u16) val;
1481 good_mbuf_cnt++;
1482 }
1483
1484 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1485 }
1486
1487 /* Free the good ones back to the mbuf pool thus discarding
1488 * all the bad ones. */
1489 while (good_mbuf_cnt) {
1490 good_mbuf_cnt--;
1491
1492 val = good_mbuf[good_mbuf_cnt];
1493 val = (val << 9) | val | 1;
1494
1495 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1496 }
1497 kfree(good_mbuf);
1498 return 0;
1499}
1500
1501static void
1502bnx2_set_mac_addr(struct bnx2 *bp)
1503{
1504 u32 val;
1505 u8 *mac_addr = bp->dev->dev_addr;
1506
1507 val = (mac_addr[0] << 8) | mac_addr[1];
1508
1509 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1510
1511 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1512 (mac_addr[4] << 8) | mac_addr[5];
1513
1514 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1515}
1516
1517static inline int
1518bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1519{
1520 struct sk_buff *skb;
1521 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1522 dma_addr_t mapping;
1523 struct rx_bd *rxbd = &bp->rx_desc_ring[index];
1524 unsigned long align;
1525
1526 skb = dev_alloc_skb(bp->rx_buf_size);
1527 if (skb == NULL) {
1528 return -ENOMEM;
1529 }
1530
1531 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1532 skb_reserve(skb, 8 - align);
1533 }
1534
1535 skb->dev = bp->dev;
1536 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1537 PCI_DMA_FROMDEVICE);
1538
1539 rx_buf->skb = skb;
1540 pci_unmap_addr_set(rx_buf, mapping, mapping);
1541
1542 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1543 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1544
1545 bp->rx_prod_bseq += bp->rx_buf_use_size;
1546
1547 return 0;
1548}
1549
1550static void
1551bnx2_phy_int(struct bnx2 *bp)
1552{
1553 u32 new_link_state, old_link_state;
1554
1555 new_link_state = bp->status_blk->status_attn_bits &
1556 STATUS_ATTN_BITS_LINK_STATE;
1557 old_link_state = bp->status_blk->status_attn_bits_ack &
1558 STATUS_ATTN_BITS_LINK_STATE;
1559 if (new_link_state != old_link_state) {
1560 if (new_link_state) {
1561 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1562 STATUS_ATTN_BITS_LINK_STATE);
1563 }
1564 else {
1565 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1566 STATUS_ATTN_BITS_LINK_STATE);
1567 }
1568 bnx2_set_link(bp);
1569 }
1570}
1571
1572static void
1573bnx2_tx_int(struct bnx2 *bp)
1574{
Michael Chanf4e418f2005-11-04 08:53:48 -08001575 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001576 u16 hw_cons, sw_cons, sw_ring_cons;
1577 int tx_free_bd = 0;
1578
Michael Chanf4e418f2005-11-04 08:53:48 -08001579 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001580 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1581 hw_cons++;
1582 }
1583 sw_cons = bp->tx_cons;
1584
1585 while (sw_cons != hw_cons) {
1586 struct sw_bd *tx_buf;
1587 struct sk_buff *skb;
1588 int i, last;
1589
1590 sw_ring_cons = TX_RING_IDX(sw_cons);
1591
1592 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1593 skb = tx_buf->skb;
1594#ifdef BCM_TSO
1595 /* partial BD completions possible with TSO packets */
1596 if (skb_shinfo(skb)->tso_size) {
1597 u16 last_idx, last_ring_idx;
1598
1599 last_idx = sw_cons +
1600 skb_shinfo(skb)->nr_frags + 1;
1601 last_ring_idx = sw_ring_cons +
1602 skb_shinfo(skb)->nr_frags + 1;
1603 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1604 last_idx++;
1605 }
1606 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1607 break;
1608 }
1609 }
1610#endif
1611 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1612 skb_headlen(skb), PCI_DMA_TODEVICE);
1613
1614 tx_buf->skb = NULL;
1615 last = skb_shinfo(skb)->nr_frags;
1616
1617 for (i = 0; i < last; i++) {
1618 sw_cons = NEXT_TX_BD(sw_cons);
1619
1620 pci_unmap_page(bp->pdev,
1621 pci_unmap_addr(
1622 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1623 mapping),
1624 skb_shinfo(skb)->frags[i].size,
1625 PCI_DMA_TODEVICE);
1626 }
1627
1628 sw_cons = NEXT_TX_BD(sw_cons);
1629
1630 tx_free_bd += last + 1;
1631
1632 dev_kfree_skb_irq(skb);
1633
Michael Chanf4e418f2005-11-04 08:53:48 -08001634 hw_cons = bp->hw_tx_cons =
1635 sblk->status_tx_quick_consumer_index0;
1636
Michael Chanb6016b72005-05-26 13:03:09 -07001637 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1638 hw_cons++;
1639 }
1640 }
1641
Michael Chane89bbf12005-08-25 15:36:58 -07001642 bp->tx_cons = sw_cons;
Michael Chanb6016b72005-05-26 13:03:09 -07001643
1644 if (unlikely(netif_queue_stopped(bp->dev))) {
Michael Chanc770a652005-08-25 15:38:39 -07001645 spin_lock(&bp->tx_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001646 if ((netif_queue_stopped(bp->dev)) &&
Michael Chane89bbf12005-08-25 15:36:58 -07001647 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001648
1649 netif_wake_queue(bp->dev);
1650 }
Michael Chanc770a652005-08-25 15:38:39 -07001651 spin_unlock(&bp->tx_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001652 }
Michael Chanb6016b72005-05-26 13:03:09 -07001653}
1654
1655static inline void
1656bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1657 u16 cons, u16 prod)
1658{
1659 struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons];
1660 struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod];
1661 struct rx_bd *cons_bd = &bp->rx_desc_ring[cons];
1662 struct rx_bd *prod_bd = &bp->rx_desc_ring[prod];
1663
1664 pci_dma_sync_single_for_device(bp->pdev,
1665 pci_unmap_addr(cons_rx_buf, mapping),
1666 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1667
1668 prod_rx_buf->skb = cons_rx_buf->skb;
1669 pci_unmap_addr_set(prod_rx_buf, mapping,
1670 pci_unmap_addr(cons_rx_buf, mapping));
1671
1672 memcpy(prod_bd, cons_bd, 8);
1673
1674 bp->rx_prod_bseq += bp->rx_buf_use_size;
1675
1676}
1677
1678static int
1679bnx2_rx_int(struct bnx2 *bp, int budget)
1680{
Michael Chanf4e418f2005-11-04 08:53:48 -08001681 struct status_block *sblk = bp->status_blk;
Michael Chanb6016b72005-05-26 13:03:09 -07001682 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1683 struct l2_fhdr *rx_hdr;
1684 int rx_pkt = 0;
1685
Michael Chanf4e418f2005-11-04 08:53:48 -08001686 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
Michael Chanb6016b72005-05-26 13:03:09 -07001687 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1688 hw_cons++;
1689 }
1690 sw_cons = bp->rx_cons;
1691 sw_prod = bp->rx_prod;
1692
1693 /* Memory barrier necessary as speculative reads of the rx
1694 * buffer can be ahead of the index in the status block
1695 */
1696 rmb();
1697 while (sw_cons != hw_cons) {
1698 unsigned int len;
Michael Chanade2bfe2006-01-23 16:09:51 -08001699 u32 status;
Michael Chanb6016b72005-05-26 13:03:09 -07001700 struct sw_bd *rx_buf;
1701 struct sk_buff *skb;
1702
1703 sw_ring_cons = RX_RING_IDX(sw_cons);
1704 sw_ring_prod = RX_RING_IDX(sw_prod);
1705
1706 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1707 skb = rx_buf->skb;
1708 pci_dma_sync_single_for_cpu(bp->pdev,
1709 pci_unmap_addr(rx_buf, mapping),
1710 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1711
1712 rx_hdr = (struct l2_fhdr *) skb->data;
1713 len = rx_hdr->l2_fhdr_pkt_len - 4;
1714
Michael Chanade2bfe2006-01-23 16:09:51 -08001715 if ((status = rx_hdr->l2_fhdr_status) &
Michael Chanb6016b72005-05-26 13:03:09 -07001716 (L2_FHDR_ERRORS_BAD_CRC |
1717 L2_FHDR_ERRORS_PHY_DECODE |
1718 L2_FHDR_ERRORS_ALIGNMENT |
1719 L2_FHDR_ERRORS_TOO_SHORT |
1720 L2_FHDR_ERRORS_GIANT_FRAME)) {
1721
1722 goto reuse_rx;
1723 }
1724
1725 /* Since we don't have a jumbo ring, copy small packets
1726 * if mtu > 1500
1727 */
1728 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1729 struct sk_buff *new_skb;
1730
1731 new_skb = dev_alloc_skb(len + 2);
1732 if (new_skb == NULL)
1733 goto reuse_rx;
1734
1735 /* aligned copy */
1736 memcpy(new_skb->data,
1737 skb->data + bp->rx_offset - 2,
1738 len + 2);
1739
1740 skb_reserve(new_skb, 2);
1741 skb_put(new_skb, len);
1742 new_skb->dev = bp->dev;
1743
1744 bnx2_reuse_rx_skb(bp, skb,
1745 sw_ring_cons, sw_ring_prod);
1746
1747 skb = new_skb;
1748 }
1749 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1750 pci_unmap_single(bp->pdev,
1751 pci_unmap_addr(rx_buf, mapping),
1752 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1753
1754 skb_reserve(skb, bp->rx_offset);
1755 skb_put(skb, len);
1756 }
1757 else {
1758reuse_rx:
1759 bnx2_reuse_rx_skb(bp, skb,
1760 sw_ring_cons, sw_ring_prod);
1761 goto next_rx;
1762 }
1763
1764 skb->protocol = eth_type_trans(skb, bp->dev);
1765
1766 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1767 (htons(skb->protocol) != 0x8100)) {
1768
1769 dev_kfree_skb_irq(skb);
1770 goto next_rx;
1771
1772 }
1773
Michael Chanb6016b72005-05-26 13:03:09 -07001774 skb->ip_summed = CHECKSUM_NONE;
1775 if (bp->rx_csum &&
1776 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1777 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1778
Michael Chanade2bfe2006-01-23 16:09:51 -08001779 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1780 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
Michael Chanb6016b72005-05-26 13:03:09 -07001781 skb->ip_summed = CHECKSUM_UNNECESSARY;
1782 }
1783
1784#ifdef BCM_VLAN
1785 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1786 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1787 rx_hdr->l2_fhdr_vlan_tag);
1788 }
1789 else
1790#endif
1791 netif_receive_skb(skb);
1792
1793 bp->dev->last_rx = jiffies;
1794 rx_pkt++;
1795
1796next_rx:
1797 rx_buf->skb = NULL;
1798
1799 sw_cons = NEXT_RX_BD(sw_cons);
1800 sw_prod = NEXT_RX_BD(sw_prod);
1801
1802 if ((rx_pkt == budget))
1803 break;
Michael Chanf4e418f2005-11-04 08:53:48 -08001804
1805 /* Refresh hw_cons to see if there is new work */
1806 if (sw_cons == hw_cons) {
1807 hw_cons = bp->hw_rx_cons =
1808 sblk->status_rx_quick_consumer_index0;
1809 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1810 hw_cons++;
1811 rmb();
1812 }
Michael Chanb6016b72005-05-26 13:03:09 -07001813 }
1814 bp->rx_cons = sw_cons;
1815 bp->rx_prod = sw_prod;
1816
1817 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1818
1819 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1820
1821 mmiowb();
1822
1823 return rx_pkt;
1824
1825}
1826
1827/* MSI ISR - The only difference between this and the INTx ISR
1828 * is that the MSI interrupt is always serviced.
1829 */
1830static irqreturn_t
1831bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1832{
1833 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001834 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001835
Michael Chanc921e4c2005-09-08 13:15:32 -07001836 prefetch(bp->status_blk);
Michael Chanb6016b72005-05-26 13:03:09 -07001837 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1838 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1839 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1840
1841 /* Return here if interrupt is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001842 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1843 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001844
Michael Chan73eef4c2005-08-25 15:39:15 -07001845 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001846
Michael Chan73eef4c2005-08-25 15:39:15 -07001847 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001848}
1849
1850static irqreturn_t
1851bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1852{
1853 struct net_device *dev = dev_instance;
Michael Chan972ec0d2006-01-23 16:12:43 -08001854 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001855
1856 /* When using INTx, it is possible for the interrupt to arrive
1857 * at the CPU before the status block posted prior to the
1858 * interrupt. Reading a register will flush the status block.
1859 * When using MSI, the MSI message will always complete after
1860 * the status block write.
1861 */
Michael Chanc921e4c2005-09-08 13:15:32 -07001862 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
Michael Chanb6016b72005-05-26 13:03:09 -07001863 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1864 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
Michael Chan73eef4c2005-08-25 15:39:15 -07001865 return IRQ_NONE;
Michael Chanb6016b72005-05-26 13:03:09 -07001866
1867 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1868 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1869 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1870
1871 /* Return here if interrupt is shared and is disabled. */
Michael Chan73eef4c2005-08-25 15:39:15 -07001872 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1873 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001874
Michael Chan73eef4c2005-08-25 15:39:15 -07001875 netif_rx_schedule(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001876
Michael Chan73eef4c2005-08-25 15:39:15 -07001877 return IRQ_HANDLED;
Michael Chanb6016b72005-05-26 13:03:09 -07001878}
1879
Michael Chanf4e418f2005-11-04 08:53:48 -08001880static inline int
1881bnx2_has_work(struct bnx2 *bp)
1882{
1883 struct status_block *sblk = bp->status_blk;
1884
1885 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1886 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1887 return 1;
1888
1889 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1890 bp->link_up)
1891 return 1;
1892
1893 return 0;
1894}
1895
Michael Chanb6016b72005-05-26 13:03:09 -07001896static int
1897bnx2_poll(struct net_device *dev, int *budget)
1898{
Michael Chan972ec0d2006-01-23 16:12:43 -08001899 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001900
Michael Chanb6016b72005-05-26 13:03:09 -07001901 if ((bp->status_blk->status_attn_bits &
1902 STATUS_ATTN_BITS_LINK_STATE) !=
1903 (bp->status_blk->status_attn_bits_ack &
1904 STATUS_ATTN_BITS_LINK_STATE)) {
1905
Michael Chanc770a652005-08-25 15:38:39 -07001906 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001907 bnx2_phy_int(bp);
Michael Chanc770a652005-08-25 15:38:39 -07001908 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001909 }
1910
Michael Chanf4e418f2005-11-04 08:53:48 -08001911 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
Michael Chanb6016b72005-05-26 13:03:09 -07001912 bnx2_tx_int(bp);
Michael Chanb6016b72005-05-26 13:03:09 -07001913
Michael Chanf4e418f2005-11-04 08:53:48 -08001914 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
Michael Chanb6016b72005-05-26 13:03:09 -07001915 int orig_budget = *budget;
1916 int work_done;
1917
1918 if (orig_budget > dev->quota)
1919 orig_budget = dev->quota;
1920
1921 work_done = bnx2_rx_int(bp, orig_budget);
1922 *budget -= work_done;
1923 dev->quota -= work_done;
Michael Chanb6016b72005-05-26 13:03:09 -07001924 }
1925
Michael Chanf4e418f2005-11-04 08:53:48 -08001926 bp->last_status_idx = bp->status_blk->status_idx;
1927 rmb();
1928
1929 if (!bnx2_has_work(bp)) {
Michael Chanb6016b72005-05-26 13:03:09 -07001930 netif_rx_complete(dev);
Michael Chan1269a8a2006-01-23 16:11:03 -08001931 if (likely(bp->flags & USING_MSI_FLAG)) {
1932 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1933 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1934 bp->last_status_idx);
1935 return 0;
1936 }
Michael Chanb6016b72005-05-26 13:03:09 -07001937 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
Michael Chan1269a8a2006-01-23 16:11:03 -08001938 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1939 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1940 bp->last_status_idx);
1941
1942 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1943 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1944 bp->last_status_idx);
Michael Chanb6016b72005-05-26 13:03:09 -07001945 return 0;
1946 }
1947
1948 return 1;
1949}
1950
1951/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1952 * from set_multicast.
1953 */
1954static void
1955bnx2_set_rx_mode(struct net_device *dev)
1956{
Michael Chan972ec0d2006-01-23 16:12:43 -08001957 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07001958 u32 rx_mode, sort_mode;
1959 int i;
Michael Chanb6016b72005-05-26 13:03:09 -07001960
Michael Chanc770a652005-08-25 15:38:39 -07001961 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07001962
1963 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1964 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1965 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1966#ifdef BCM_VLAN
Michael Chane29054f2006-01-23 16:06:06 -08001967 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
Michael Chanb6016b72005-05-26 13:03:09 -07001968 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07001969#else
Michael Chane29054f2006-01-23 16:06:06 -08001970 if (!(bp->flags & ASF_ENABLE_FLAG))
1971 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
Michael Chanb6016b72005-05-26 13:03:09 -07001972#endif
1973 if (dev->flags & IFF_PROMISC) {
1974 /* Promiscuous mode. */
1975 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1976 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1977 }
1978 else if (dev->flags & IFF_ALLMULTI) {
1979 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1980 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1981 0xffffffff);
1982 }
1983 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
1984 }
1985 else {
1986 /* Accept one or more multicast(s). */
1987 struct dev_mc_list *mclist;
1988 u32 mc_filter[NUM_MC_HASH_REGISTERS];
1989 u32 regidx;
1990 u32 bit;
1991 u32 crc;
1992
1993 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
1994
1995 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1996 i++, mclist = mclist->next) {
1997
1998 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1999 bit = crc & 0xff;
2000 regidx = (bit & 0xe0) >> 5;
2001 bit &= 0x1f;
2002 mc_filter[regidx] |= (1 << bit);
2003 }
2004
2005 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2006 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2007 mc_filter[i]);
2008 }
2009
2010 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2011 }
2012
2013 if (rx_mode != bp->rx_mode) {
2014 bp->rx_mode = rx_mode;
2015 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2016 }
2017
2018 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2019 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2020 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2021
Michael Chanc770a652005-08-25 15:38:39 -07002022 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07002023}
2024
2025static void
2026load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2027 u32 rv2p_proc)
2028{
2029 int i;
2030 u32 val;
2031
2032
2033 for (i = 0; i < rv2p_code_len; i += 8) {
2034 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2035 rv2p_code++;
2036 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2037 rv2p_code++;
2038
2039 if (rv2p_proc == RV2P_PROC1) {
2040 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2041 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2042 }
2043 else {
2044 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2045 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2046 }
2047 }
2048
2049 /* Reset the processor, un-stall is done later. */
2050 if (rv2p_proc == RV2P_PROC1) {
2051 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2052 }
2053 else {
2054 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2055 }
2056}
2057
2058static void
2059load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2060{
2061 u32 offset;
2062 u32 val;
2063
2064 /* Halt the CPU. */
2065 val = REG_RD_IND(bp, cpu_reg->mode);
2066 val |= cpu_reg->mode_value_halt;
2067 REG_WR_IND(bp, cpu_reg->mode, val);
2068 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2069
2070 /* Load the Text area. */
2071 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2072 if (fw->text) {
2073 int j;
2074
2075 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2076 REG_WR_IND(bp, offset, fw->text[j]);
2077 }
2078 }
2079
2080 /* Load the Data area. */
2081 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2082 if (fw->data) {
2083 int j;
2084
2085 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2086 REG_WR_IND(bp, offset, fw->data[j]);
2087 }
2088 }
2089
2090 /* Load the SBSS area. */
2091 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2092 if (fw->sbss) {
2093 int j;
2094
2095 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2096 REG_WR_IND(bp, offset, fw->sbss[j]);
2097 }
2098 }
2099
2100 /* Load the BSS area. */
2101 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2102 if (fw->bss) {
2103 int j;
2104
2105 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2106 REG_WR_IND(bp, offset, fw->bss[j]);
2107 }
2108 }
2109
2110 /* Load the Read-Only area. */
2111 offset = cpu_reg->spad_base +
2112 (fw->rodata_addr - cpu_reg->mips_view_base);
2113 if (fw->rodata) {
2114 int j;
2115
2116 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2117 REG_WR_IND(bp, offset, fw->rodata[j]);
2118 }
2119 }
2120
2121 /* Clear the pre-fetch instruction. */
2122 REG_WR_IND(bp, cpu_reg->inst, 0);
2123 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2124
2125 /* Start the CPU. */
2126 val = REG_RD_IND(bp, cpu_reg->mode);
2127 val &= ~cpu_reg->mode_value_halt;
2128 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2129 REG_WR_IND(bp, cpu_reg->mode, val);
2130}
2131
2132static void
2133bnx2_init_cpus(struct bnx2 *bp)
2134{
2135 struct cpu_reg cpu_reg;
2136 struct fw_info fw;
2137
2138 /* Initialize the RV2P processor. */
2139 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2140 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2141
2142 /* Initialize the RX Processor. */
2143 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2144 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2145 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2146 cpu_reg.state = BNX2_RXP_CPU_STATE;
2147 cpu_reg.state_value_clear = 0xffffff;
2148 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2149 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2150 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2151 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2152 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2153 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2154 cpu_reg.mips_view_base = 0x8000000;
2155
2156 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2157 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2158 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2159 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2160
2161 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2162 fw.text_len = bnx2_RXP_b06FwTextLen;
2163 fw.text_index = 0;
2164 fw.text = bnx2_RXP_b06FwText;
2165
2166 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2167 fw.data_len = bnx2_RXP_b06FwDataLen;
2168 fw.data_index = 0;
2169 fw.data = bnx2_RXP_b06FwData;
2170
2171 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2172 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2173 fw.sbss_index = 0;
2174 fw.sbss = bnx2_RXP_b06FwSbss;
2175
2176 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2177 fw.bss_len = bnx2_RXP_b06FwBssLen;
2178 fw.bss_index = 0;
2179 fw.bss = bnx2_RXP_b06FwBss;
2180
2181 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2182 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2183 fw.rodata_index = 0;
2184 fw.rodata = bnx2_RXP_b06FwRodata;
2185
2186 load_cpu_fw(bp, &cpu_reg, &fw);
2187
2188 /* Initialize the TX Processor. */
2189 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2190 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2191 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2192 cpu_reg.state = BNX2_TXP_CPU_STATE;
2193 cpu_reg.state_value_clear = 0xffffff;
2194 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2195 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2196 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2197 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2198 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2199 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2200 cpu_reg.mips_view_base = 0x8000000;
2201
2202 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2203 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2204 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2205 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2206
2207 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2208 fw.text_len = bnx2_TXP_b06FwTextLen;
2209 fw.text_index = 0;
2210 fw.text = bnx2_TXP_b06FwText;
2211
2212 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2213 fw.data_len = bnx2_TXP_b06FwDataLen;
2214 fw.data_index = 0;
2215 fw.data = bnx2_TXP_b06FwData;
2216
2217 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2218 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2219 fw.sbss_index = 0;
2220 fw.sbss = bnx2_TXP_b06FwSbss;
2221
2222 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2223 fw.bss_len = bnx2_TXP_b06FwBssLen;
2224 fw.bss_index = 0;
2225 fw.bss = bnx2_TXP_b06FwBss;
2226
2227 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2228 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2229 fw.rodata_index = 0;
2230 fw.rodata = bnx2_TXP_b06FwRodata;
2231
2232 load_cpu_fw(bp, &cpu_reg, &fw);
2233
2234 /* Initialize the TX Patch-up Processor. */
2235 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2236 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2237 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2238 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2239 cpu_reg.state_value_clear = 0xffffff;
2240 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2241 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2242 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2243 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2244 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2245 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2246 cpu_reg.mips_view_base = 0x8000000;
2247
2248 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2249 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2250 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2251 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2252
2253 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2254 fw.text_len = bnx2_TPAT_b06FwTextLen;
2255 fw.text_index = 0;
2256 fw.text = bnx2_TPAT_b06FwText;
2257
2258 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2259 fw.data_len = bnx2_TPAT_b06FwDataLen;
2260 fw.data_index = 0;
2261 fw.data = bnx2_TPAT_b06FwData;
2262
2263 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2264 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2265 fw.sbss_index = 0;
2266 fw.sbss = bnx2_TPAT_b06FwSbss;
2267
2268 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2269 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2270 fw.bss_index = 0;
2271 fw.bss = bnx2_TPAT_b06FwBss;
2272
2273 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2274 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2275 fw.rodata_index = 0;
2276 fw.rodata = bnx2_TPAT_b06FwRodata;
2277
2278 load_cpu_fw(bp, &cpu_reg, &fw);
2279
2280 /* Initialize the Completion Processor. */
2281 cpu_reg.mode = BNX2_COM_CPU_MODE;
2282 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2283 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2284 cpu_reg.state = BNX2_COM_CPU_STATE;
2285 cpu_reg.state_value_clear = 0xffffff;
2286 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2287 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2288 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2289 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2290 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2291 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2292 cpu_reg.mips_view_base = 0x8000000;
2293
2294 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2295 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2296 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2297 fw.start_addr = bnx2_COM_b06FwStartAddr;
2298
2299 fw.text_addr = bnx2_COM_b06FwTextAddr;
2300 fw.text_len = bnx2_COM_b06FwTextLen;
2301 fw.text_index = 0;
2302 fw.text = bnx2_COM_b06FwText;
2303
2304 fw.data_addr = bnx2_COM_b06FwDataAddr;
2305 fw.data_len = bnx2_COM_b06FwDataLen;
2306 fw.data_index = 0;
2307 fw.data = bnx2_COM_b06FwData;
2308
2309 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2310 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2311 fw.sbss_index = 0;
2312 fw.sbss = bnx2_COM_b06FwSbss;
2313
2314 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2315 fw.bss_len = bnx2_COM_b06FwBssLen;
2316 fw.bss_index = 0;
2317 fw.bss = bnx2_COM_b06FwBss;
2318
2319 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2320 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2321 fw.rodata_index = 0;
2322 fw.rodata = bnx2_COM_b06FwRodata;
2323
2324 load_cpu_fw(bp, &cpu_reg, &fw);
2325
2326}
2327
2328static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07002329bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07002330{
2331 u16 pmcsr;
2332
2333 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2334
2335 switch (state) {
Pavel Machek829ca9a2005-09-03 15:56:56 -07002336 case PCI_D0: {
Michael Chanb6016b72005-05-26 13:03:09 -07002337 u32 val;
2338
2339 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2340 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2341 PCI_PM_CTRL_PME_STATUS);
2342
2343 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2344 /* delay required during transition out of D3hot */
2345 msleep(20);
2346
2347 val = REG_RD(bp, BNX2_EMAC_MODE);
2348 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2349 val &= ~BNX2_EMAC_MODE_MPKT;
2350 REG_WR(bp, BNX2_EMAC_MODE, val);
2351
2352 val = REG_RD(bp, BNX2_RPM_CONFIG);
2353 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2354 REG_WR(bp, BNX2_RPM_CONFIG, val);
2355 break;
2356 }
Pavel Machek829ca9a2005-09-03 15:56:56 -07002357 case PCI_D3hot: {
Michael Chanb6016b72005-05-26 13:03:09 -07002358 int i;
2359 u32 val, wol_msg;
2360
2361 if (bp->wol) {
2362 u32 advertising;
2363 u8 autoneg;
2364
2365 autoneg = bp->autoneg;
2366 advertising = bp->advertising;
2367
2368 bp->autoneg = AUTONEG_SPEED;
2369 bp->advertising = ADVERTISED_10baseT_Half |
2370 ADVERTISED_10baseT_Full |
2371 ADVERTISED_100baseT_Half |
2372 ADVERTISED_100baseT_Full |
2373 ADVERTISED_Autoneg;
2374
2375 bnx2_setup_copper_phy(bp);
2376
2377 bp->autoneg = autoneg;
2378 bp->advertising = advertising;
2379
2380 bnx2_set_mac_addr(bp);
2381
2382 val = REG_RD(bp, BNX2_EMAC_MODE);
2383
2384 /* Enable port mode. */
2385 val &= ~BNX2_EMAC_MODE_PORT;
2386 val |= BNX2_EMAC_MODE_PORT_MII |
2387 BNX2_EMAC_MODE_MPKT_RCVD |
2388 BNX2_EMAC_MODE_ACPI_RCVD |
Michael Chanb6016b72005-05-26 13:03:09 -07002389 BNX2_EMAC_MODE_MPKT;
2390
2391 REG_WR(bp, BNX2_EMAC_MODE, val);
2392
2393 /* receive all multicast */
2394 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2395 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2396 0xffffffff);
2397 }
2398 REG_WR(bp, BNX2_EMAC_RX_MODE,
2399 BNX2_EMAC_RX_MODE_SORT_MODE);
2400
2401 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2402 BNX2_RPM_SORT_USER0_MC_EN;
2403 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2404 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2405 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2406 BNX2_RPM_SORT_USER0_ENA);
2407
2408 /* Need to enable EMAC and RPM for WOL. */
2409 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2410 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2411 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2412 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2413
2414 val = REG_RD(bp, BNX2_RPM_CONFIG);
2415 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2416 REG_WR(bp, BNX2_RPM_CONFIG, val);
2417
2418 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2419 }
2420 else {
2421 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2422 }
2423
Michael Chandda1e392006-01-23 16:08:14 -08002424 if (!(bp->flags & NO_WOL_FLAG))
2425 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
Michael Chanb6016b72005-05-26 13:03:09 -07002426
2427 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2428 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2429 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2430
2431 if (bp->wol)
2432 pmcsr |= 3;
2433 }
2434 else {
2435 pmcsr |= 3;
2436 }
2437 if (bp->wol) {
2438 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2439 }
2440 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2441 pmcsr);
2442
2443 /* No more memory access after this point until
2444 * device is brought back to D0.
2445 */
2446 udelay(50);
2447 break;
2448 }
2449 default:
2450 return -EINVAL;
2451 }
2452 return 0;
2453}
2454
2455static int
2456bnx2_acquire_nvram_lock(struct bnx2 *bp)
2457{
2458 u32 val;
2459 int j;
2460
2461 /* Request access to the flash interface. */
2462 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2463 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2464 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2465 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2466 break;
2467
2468 udelay(5);
2469 }
2470
2471 if (j >= NVRAM_TIMEOUT_COUNT)
2472 return -EBUSY;
2473
2474 return 0;
2475}
2476
2477static int
2478bnx2_release_nvram_lock(struct bnx2 *bp)
2479{
2480 int j;
2481 u32 val;
2482
2483 /* Relinquish nvram interface. */
2484 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2485
2486 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2487 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2488 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2489 break;
2490
2491 udelay(5);
2492 }
2493
2494 if (j >= NVRAM_TIMEOUT_COUNT)
2495 return -EBUSY;
2496
2497 return 0;
2498}
2499
2500
2501static int
2502bnx2_enable_nvram_write(struct bnx2 *bp)
2503{
2504 u32 val;
2505
2506 val = REG_RD(bp, BNX2_MISC_CFG);
2507 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2508
2509 if (!bp->flash_info->buffered) {
2510 int j;
2511
2512 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2513 REG_WR(bp, BNX2_NVM_COMMAND,
2514 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2515
2516 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2517 udelay(5);
2518
2519 val = REG_RD(bp, BNX2_NVM_COMMAND);
2520 if (val & BNX2_NVM_COMMAND_DONE)
2521 break;
2522 }
2523
2524 if (j >= NVRAM_TIMEOUT_COUNT)
2525 return -EBUSY;
2526 }
2527 return 0;
2528}
2529
2530static void
2531bnx2_disable_nvram_write(struct bnx2 *bp)
2532{
2533 u32 val;
2534
2535 val = REG_RD(bp, BNX2_MISC_CFG);
2536 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2537}
2538
2539
2540static void
2541bnx2_enable_nvram_access(struct bnx2 *bp)
2542{
2543 u32 val;
2544
2545 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2546 /* Enable both bits, even on read. */
2547 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2548 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2549}
2550
2551static void
2552bnx2_disable_nvram_access(struct bnx2 *bp)
2553{
2554 u32 val;
2555
2556 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2557 /* Disable both bits, even after read. */
2558 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2559 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2560 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2561}
2562
2563static int
2564bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2565{
2566 u32 cmd;
2567 int j;
2568
2569 if (bp->flash_info->buffered)
2570 /* Buffered flash, no erase needed */
2571 return 0;
2572
2573 /* Build an erase command */
2574 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2575 BNX2_NVM_COMMAND_DOIT;
2576
2577 /* Need to clear DONE bit separately. */
2578 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2579
2580 /* Address of the NVRAM to read from. */
2581 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2582
2583 /* Issue an erase command. */
2584 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2585
2586 /* Wait for completion. */
2587 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2588 u32 val;
2589
2590 udelay(5);
2591
2592 val = REG_RD(bp, BNX2_NVM_COMMAND);
2593 if (val & BNX2_NVM_COMMAND_DONE)
2594 break;
2595 }
2596
2597 if (j >= NVRAM_TIMEOUT_COUNT)
2598 return -EBUSY;
2599
2600 return 0;
2601}
2602
2603static int
2604bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2605{
2606 u32 cmd;
2607 int j;
2608
2609 /* Build the command word. */
2610 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2611
2612 /* Calculate an offset of a buffered flash. */
2613 if (bp->flash_info->buffered) {
2614 offset = ((offset / bp->flash_info->page_size) <<
2615 bp->flash_info->page_bits) +
2616 (offset % bp->flash_info->page_size);
2617 }
2618
2619 /* Need to clear DONE bit separately. */
2620 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2621
2622 /* Address of the NVRAM to read from. */
2623 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2624
2625 /* Issue a read command. */
2626 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2627
2628 /* Wait for completion. */
2629 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2630 u32 val;
2631
2632 udelay(5);
2633
2634 val = REG_RD(bp, BNX2_NVM_COMMAND);
2635 if (val & BNX2_NVM_COMMAND_DONE) {
2636 val = REG_RD(bp, BNX2_NVM_READ);
2637
2638 val = be32_to_cpu(val);
2639 memcpy(ret_val, &val, 4);
2640 break;
2641 }
2642 }
2643 if (j >= NVRAM_TIMEOUT_COUNT)
2644 return -EBUSY;
2645
2646 return 0;
2647}
2648
2649
2650static int
2651bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2652{
2653 u32 cmd, val32;
2654 int j;
2655
2656 /* Build the command word. */
2657 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2658
2659 /* Calculate an offset of a buffered flash. */
2660 if (bp->flash_info->buffered) {
2661 offset = ((offset / bp->flash_info->page_size) <<
2662 bp->flash_info->page_bits) +
2663 (offset % bp->flash_info->page_size);
2664 }
2665
2666 /* Need to clear DONE bit separately. */
2667 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2668
2669 memcpy(&val32, val, 4);
2670 val32 = cpu_to_be32(val32);
2671
2672 /* Write the data. */
2673 REG_WR(bp, BNX2_NVM_WRITE, val32);
2674
2675 /* Address of the NVRAM to write to. */
2676 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2677
2678 /* Issue the write command. */
2679 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2680
2681 /* Wait for completion. */
2682 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2683 udelay(5);
2684
2685 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2686 break;
2687 }
2688 if (j >= NVRAM_TIMEOUT_COUNT)
2689 return -EBUSY;
2690
2691 return 0;
2692}
2693
2694static int
2695bnx2_init_nvram(struct bnx2 *bp)
2696{
2697 u32 val;
2698 int j, entry_count, rc;
2699 struct flash_spec *flash;
2700
2701 /* Determine the selected interface. */
2702 val = REG_RD(bp, BNX2_NVM_CFG1);
2703
2704 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2705
2706 rc = 0;
2707 if (val & 0x40000000) {
2708
2709 /* Flash interface has been reconfigured */
2710 for (j = 0, flash = &flash_table[0]; j < entry_count;
Michael Chan37137702005-11-04 08:49:17 -08002711 j++, flash++) {
2712 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2713 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002714 bp->flash_info = flash;
2715 break;
2716 }
2717 }
2718 }
2719 else {
Michael Chan37137702005-11-04 08:49:17 -08002720 u32 mask;
Michael Chanb6016b72005-05-26 13:03:09 -07002721 /* Not yet been reconfigured */
2722
Michael Chan37137702005-11-04 08:49:17 -08002723 if (val & (1 << 23))
2724 mask = FLASH_BACKUP_STRAP_MASK;
2725 else
2726 mask = FLASH_STRAP_MASK;
2727
Michael Chanb6016b72005-05-26 13:03:09 -07002728 for (j = 0, flash = &flash_table[0]; j < entry_count;
2729 j++, flash++) {
2730
Michael Chan37137702005-11-04 08:49:17 -08002731 if ((val & mask) == (flash->strapping & mask)) {
Michael Chanb6016b72005-05-26 13:03:09 -07002732 bp->flash_info = flash;
2733
2734 /* Request access to the flash interface. */
2735 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2736 return rc;
2737
2738 /* Enable access to flash interface */
2739 bnx2_enable_nvram_access(bp);
2740
2741 /* Reconfigure the flash interface */
2742 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2743 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2744 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2745 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2746
2747 /* Disable access to flash interface */
2748 bnx2_disable_nvram_access(bp);
2749 bnx2_release_nvram_lock(bp);
2750
2751 break;
2752 }
2753 }
2754 } /* if (val & 0x40000000) */
2755
2756 if (j == entry_count) {
2757 bp->flash_info = NULL;
John W. Linville2f23c522005-11-10 12:57:33 -08002758 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
Michael Chan1122db72006-01-23 16:11:42 -08002759 return -ENODEV;
Michael Chanb6016b72005-05-26 13:03:09 -07002760 }
2761
Michael Chan1122db72006-01-23 16:11:42 -08002762 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2763 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2764 if (val)
2765 bp->flash_size = val;
2766 else
2767 bp->flash_size = bp->flash_info->total_size;
2768
Michael Chanb6016b72005-05-26 13:03:09 -07002769 return rc;
2770}
2771
2772static int
2773bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2774 int buf_size)
2775{
2776 int rc = 0;
2777 u32 cmd_flags, offset32, len32, extra;
2778
2779 if (buf_size == 0)
2780 return 0;
2781
2782 /* Request access to the flash interface. */
2783 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2784 return rc;
2785
2786 /* Enable access to flash interface */
2787 bnx2_enable_nvram_access(bp);
2788
2789 len32 = buf_size;
2790 offset32 = offset;
2791 extra = 0;
2792
2793 cmd_flags = 0;
2794
2795 if (offset32 & 3) {
2796 u8 buf[4];
2797 u32 pre_len;
2798
2799 offset32 &= ~3;
2800 pre_len = 4 - (offset & 3);
2801
2802 if (pre_len >= len32) {
2803 pre_len = len32;
2804 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2805 BNX2_NVM_COMMAND_LAST;
2806 }
2807 else {
2808 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2809 }
2810
2811 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2812
2813 if (rc)
2814 return rc;
2815
2816 memcpy(ret_buf, buf + (offset & 3), pre_len);
2817
2818 offset32 += 4;
2819 ret_buf += pre_len;
2820 len32 -= pre_len;
2821 }
2822 if (len32 & 3) {
2823 extra = 4 - (len32 & 3);
2824 len32 = (len32 + 4) & ~3;
2825 }
2826
2827 if (len32 == 4) {
2828 u8 buf[4];
2829
2830 if (cmd_flags)
2831 cmd_flags = BNX2_NVM_COMMAND_LAST;
2832 else
2833 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2834 BNX2_NVM_COMMAND_LAST;
2835
2836 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2837
2838 memcpy(ret_buf, buf, 4 - extra);
2839 }
2840 else if (len32 > 0) {
2841 u8 buf[4];
2842
2843 /* Read the first word. */
2844 if (cmd_flags)
2845 cmd_flags = 0;
2846 else
2847 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2848
2849 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2850
2851 /* Advance to the next dword. */
2852 offset32 += 4;
2853 ret_buf += 4;
2854 len32 -= 4;
2855
2856 while (len32 > 4 && rc == 0) {
2857 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2858
2859 /* Advance to the next dword. */
2860 offset32 += 4;
2861 ret_buf += 4;
2862 len32 -= 4;
2863 }
2864
2865 if (rc)
2866 return rc;
2867
2868 cmd_flags = BNX2_NVM_COMMAND_LAST;
2869 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2870
2871 memcpy(ret_buf, buf, 4 - extra);
2872 }
2873
2874 /* Disable access to flash interface */
2875 bnx2_disable_nvram_access(bp);
2876
2877 bnx2_release_nvram_lock(bp);
2878
2879 return rc;
2880}
2881
2882static int
2883bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2884 int buf_size)
2885{
2886 u32 written, offset32, len32;
2887 u8 *buf, start[4], end[4];
2888 int rc = 0;
2889 int align_start, align_end;
2890
2891 buf = data_buf;
2892 offset32 = offset;
2893 len32 = buf_size;
2894 align_start = align_end = 0;
2895
2896 if ((align_start = (offset32 & 3))) {
2897 offset32 &= ~3;
2898 len32 += align_start;
2899 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2900 return rc;
2901 }
2902
2903 if (len32 & 3) {
2904 if ((len32 > 4) || !align_start) {
2905 align_end = 4 - (len32 & 3);
2906 len32 += align_end;
2907 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2908 end, 4))) {
2909 return rc;
2910 }
2911 }
2912 }
2913
2914 if (align_start || align_end) {
2915 buf = kmalloc(len32, GFP_KERNEL);
2916 if (buf == 0)
2917 return -ENOMEM;
2918 if (align_start) {
2919 memcpy(buf, start, 4);
2920 }
2921 if (align_end) {
2922 memcpy(buf + len32 - 4, end, 4);
2923 }
2924 memcpy(buf + align_start, data_buf, buf_size);
2925 }
2926
2927 written = 0;
2928 while ((written < len32) && (rc == 0)) {
2929 u32 page_start, page_end, data_start, data_end;
2930 u32 addr, cmd_flags;
2931 int i;
2932 u8 flash_buffer[264];
2933
2934 /* Find the page_start addr */
2935 page_start = offset32 + written;
2936 page_start -= (page_start % bp->flash_info->page_size);
2937 /* Find the page_end addr */
2938 page_end = page_start + bp->flash_info->page_size;
2939 /* Find the data_start addr */
2940 data_start = (written == 0) ? offset32 : page_start;
2941 /* Find the data_end addr */
2942 data_end = (page_end > offset32 + len32) ?
2943 (offset32 + len32) : page_end;
2944
2945 /* Request access to the flash interface. */
2946 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2947 goto nvram_write_end;
2948
2949 /* Enable access to flash interface */
2950 bnx2_enable_nvram_access(bp);
2951
2952 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2953 if (bp->flash_info->buffered == 0) {
2954 int j;
2955
2956 /* Read the whole page into the buffer
2957 * (non-buffer flash only) */
2958 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2959 if (j == (bp->flash_info->page_size - 4)) {
2960 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2961 }
2962 rc = bnx2_nvram_read_dword(bp,
2963 page_start + j,
2964 &flash_buffer[j],
2965 cmd_flags);
2966
2967 if (rc)
2968 goto nvram_write_end;
2969
2970 cmd_flags = 0;
2971 }
2972 }
2973
2974 /* Enable writes to flash interface (unlock write-protect) */
2975 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2976 goto nvram_write_end;
2977
2978 /* Erase the page */
2979 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
2980 goto nvram_write_end;
2981
2982 /* Re-enable the write again for the actual write */
2983 bnx2_enable_nvram_write(bp);
2984
2985 /* Loop to write back the buffer data from page_start to
2986 * data_start */
2987 i = 0;
2988 if (bp->flash_info->buffered == 0) {
2989 for (addr = page_start; addr < data_start;
2990 addr += 4, i += 4) {
2991
2992 rc = bnx2_nvram_write_dword(bp, addr,
2993 &flash_buffer[i], cmd_flags);
2994
2995 if (rc != 0)
2996 goto nvram_write_end;
2997
2998 cmd_flags = 0;
2999 }
3000 }
3001
3002 /* Loop to write the new data from data_start to data_end */
3003 for (addr = data_start; addr < data_end; addr += 4, i++) {
3004 if ((addr == page_end - 4) ||
3005 ((bp->flash_info->buffered) &&
3006 (addr == data_end - 4))) {
3007
3008 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3009 }
3010 rc = bnx2_nvram_write_dword(bp, addr, buf,
3011 cmd_flags);
3012
3013 if (rc != 0)
3014 goto nvram_write_end;
3015
3016 cmd_flags = 0;
3017 buf += 4;
3018 }
3019
3020 /* Loop to write back the buffer data from data_end
3021 * to page_end */
3022 if (bp->flash_info->buffered == 0) {
3023 for (addr = data_end; addr < page_end;
3024 addr += 4, i += 4) {
3025
3026 if (addr == page_end-4) {
3027 cmd_flags = BNX2_NVM_COMMAND_LAST;
3028 }
3029 rc = bnx2_nvram_write_dword(bp, addr,
3030 &flash_buffer[i], cmd_flags);
3031
3032 if (rc != 0)
3033 goto nvram_write_end;
3034
3035 cmd_flags = 0;
3036 }
3037 }
3038
3039 /* Disable writes to flash interface (lock write-protect) */
3040 bnx2_disable_nvram_write(bp);
3041
3042 /* Disable access to flash interface */
3043 bnx2_disable_nvram_access(bp);
3044 bnx2_release_nvram_lock(bp);
3045
3046 /* Increment written */
3047 written += data_end - data_start;
3048 }
3049
3050nvram_write_end:
3051 if (align_start || align_end)
3052 kfree(buf);
3053 return rc;
3054}
3055
3056static int
3057bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3058{
3059 u32 val;
3060 int i, rc = 0;
3061
3062 /* Wait for the current PCI transaction to complete before
3063 * issuing a reset. */
3064 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3065 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3066 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3067 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3068 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3069 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3070 udelay(5);
3071
Michael Chanb090ae22006-01-23 16:07:10 -08003072 /* Wait for the firmware to tell us it is ok to issue a reset. */
3073 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3074
Michael Chanb6016b72005-05-26 13:03:09 -07003075 /* Deposit a driver reset signature so the firmware knows that
3076 * this is a soft reset. */
Michael Chane3648b32005-11-04 08:51:21 -08003077 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
Michael Chanb6016b72005-05-26 13:03:09 -07003078 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3079
Michael Chanb6016b72005-05-26 13:03:09 -07003080 /* Do a dummy read to force the chip to complete all current transaction
3081 * before we issue a reset. */
3082 val = REG_RD(bp, BNX2_MISC_ID);
3083
3084 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3085 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3086 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3087
3088 /* Chip reset. */
3089 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3090
3091 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3092 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3093 msleep(15);
3094
3095 /* Reset takes approximate 30 usec */
3096 for (i = 0; i < 10; i++) {
3097 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3098 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3099 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3100 break;
3101 }
3102 udelay(10);
3103 }
3104
3105 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3106 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3107 printk(KERN_ERR PFX "Chip reset did not complete\n");
3108 return -EBUSY;
3109 }
3110
3111 /* Make sure byte swapping is properly configured. */
3112 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3113 if (val != 0x01020304) {
3114 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3115 return -ENODEV;
3116 }
3117
Michael Chanb6016b72005-05-26 13:03:09 -07003118 /* Wait for the firmware to finish its initialization. */
Michael Chanb090ae22006-01-23 16:07:10 -08003119 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3120 if (rc)
3121 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003122
3123 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3124 /* Adjust the voltage regular to two steps lower. The default
3125 * of this register is 0x0000000e. */
3126 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3127
3128 /* Remove bad rbuf memory from the free pool. */
3129 rc = bnx2_alloc_bad_rbuf(bp);
3130 }
3131
3132 return rc;
3133}
3134
3135static int
3136bnx2_init_chip(struct bnx2 *bp)
3137{
3138 u32 val;
Michael Chanb090ae22006-01-23 16:07:10 -08003139 int rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003140
3141 /* Make sure the interrupt is not active. */
3142 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3143
3144 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3145 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3146#ifdef __BIG_ENDIAN
3147 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3148#endif
3149 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3150 DMA_READ_CHANS << 12 |
3151 DMA_WRITE_CHANS << 16;
3152
3153 val |= (0x2 << 20) | (1 << 11);
3154
Michael Chandda1e392006-01-23 16:08:14 -08003155 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
Michael Chanb6016b72005-05-26 13:03:09 -07003156 val |= (1 << 23);
3157
3158 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3159 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3160 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3161
3162 REG_WR(bp, BNX2_DMA_CONFIG, val);
3163
3164 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3165 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3166 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3167 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3168 }
3169
3170 if (bp->flags & PCIX_FLAG) {
3171 u16 val16;
3172
3173 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3174 &val16);
3175 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3176 val16 & ~PCI_X_CMD_ERO);
3177 }
3178
3179 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3180 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3181 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3182 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3183
3184 /* Initialize context mapping and zero out the quick contexts. The
3185 * context block must have already been enabled. */
3186 bnx2_init_context(bp);
3187
3188 bnx2_init_cpus(bp);
3189 bnx2_init_nvram(bp);
3190
3191 bnx2_set_mac_addr(bp);
3192
3193 val = REG_RD(bp, BNX2_MQ_CONFIG);
3194 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3195 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3196 REG_WR(bp, BNX2_MQ_CONFIG, val);
3197
3198 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3199 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3200 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3201
3202 val = (BCM_PAGE_BITS - 8) << 24;
3203 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3204
3205 /* Configure page size. */
3206 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3207 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3208 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3209 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3210
3211 val = bp->mac_addr[0] +
3212 (bp->mac_addr[1] << 8) +
3213 (bp->mac_addr[2] << 16) +
3214 bp->mac_addr[3] +
3215 (bp->mac_addr[4] << 8) +
3216 (bp->mac_addr[5] << 16);
3217 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3218
3219 /* Program the MTU. Also include 4 bytes for CRC32. */
3220 val = bp->dev->mtu + ETH_HLEN + 4;
3221 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3222 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3223 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3224
3225 bp->last_status_idx = 0;
3226 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3227
3228 /* Set up how to generate a link change interrupt. */
3229 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3230
3231 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3232 (u64) bp->status_blk_mapping & 0xffffffff);
3233 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3234
3235 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3236 (u64) bp->stats_blk_mapping & 0xffffffff);
3237 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3238 (u64) bp->stats_blk_mapping >> 32);
3239
3240 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3241 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3242
3243 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3244 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3245
3246 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3247 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3248
3249 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3250
3251 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3252
3253 REG_WR(bp, BNX2_HC_COM_TICKS,
3254 (bp->com_ticks_int << 16) | bp->com_ticks);
3255
3256 REG_WR(bp, BNX2_HC_CMD_TICKS,
3257 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3258
3259 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3260 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3261
3262 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3263 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3264 else {
3265 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3266 BNX2_HC_CONFIG_TX_TMR_MODE |
3267 BNX2_HC_CONFIG_COLLECT_STATS);
3268 }
3269
3270 /* Clear internal stats counters. */
3271 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3272
3273 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3274
Michael Chane29054f2006-01-23 16:06:06 -08003275 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3276 BNX2_PORT_FEATURE_ASF_ENABLED)
3277 bp->flags |= ASF_ENABLE_FLAG;
3278
Michael Chanb6016b72005-05-26 13:03:09 -07003279 /* Initialize the receive filter. */
3280 bnx2_set_rx_mode(bp->dev);
3281
Michael Chanb090ae22006-01-23 16:07:10 -08003282 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3283 0);
Michael Chanb6016b72005-05-26 13:03:09 -07003284
3285 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3286 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3287
3288 udelay(20);
3289
Michael Chanb090ae22006-01-23 16:07:10 -08003290 return rc;
Michael Chanb6016b72005-05-26 13:03:09 -07003291}
3292
3293
3294static void
3295bnx2_init_tx_ring(struct bnx2 *bp)
3296{
3297 struct tx_bd *txbd;
3298 u32 val;
3299
3300 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3301
3302 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3303 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3304
3305 bp->tx_prod = 0;
3306 bp->tx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003307 bp->hw_tx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003308 bp->tx_prod_bseq = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003309
3310 val = BNX2_L2CTX_TYPE_TYPE_L2;
3311 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3312 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3313
3314 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3315 val |= 8 << 16;
3316 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3317
3318 val = (u64) bp->tx_desc_mapping >> 32;
3319 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3320
3321 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3322 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3323}
3324
3325static void
3326bnx2_init_rx_ring(struct bnx2 *bp)
3327{
3328 struct rx_bd *rxbd;
3329 int i;
3330 u16 prod, ring_prod;
3331 u32 val;
3332
3333 /* 8 for CRC and VLAN */
3334 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3335 /* 8 for alignment */
3336 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3337
3338 ring_prod = prod = bp->rx_prod = 0;
3339 bp->rx_cons = 0;
Michael Chanf4e418f2005-11-04 08:53:48 -08003340 bp->hw_rx_cons = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07003341 bp->rx_prod_bseq = 0;
3342
3343 rxbd = &bp->rx_desc_ring[0];
3344 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3345 rxbd->rx_bd_len = bp->rx_buf_use_size;
3346 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3347 }
3348
3349 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
3350 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
3351
3352 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3353 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3354 val |= 0x02 << 8;
3355 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3356
3357 val = (u64) bp->rx_desc_mapping >> 32;
3358 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3359
3360 val = (u64) bp->rx_desc_mapping & 0xffffffff;
3361 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3362
3363 for ( ;ring_prod < bp->rx_ring_size; ) {
3364 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3365 break;
3366 }
3367 prod = NEXT_RX_BD(prod);
3368 ring_prod = RX_RING_IDX(prod);
3369 }
3370 bp->rx_prod = prod;
3371
3372 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3373
3374 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3375}
3376
3377static void
3378bnx2_free_tx_skbs(struct bnx2 *bp)
3379{
3380 int i;
3381
3382 if (bp->tx_buf_ring == NULL)
3383 return;
3384
3385 for (i = 0; i < TX_DESC_CNT; ) {
3386 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3387 struct sk_buff *skb = tx_buf->skb;
3388 int j, last;
3389
3390 if (skb == NULL) {
3391 i++;
3392 continue;
3393 }
3394
3395 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3396 skb_headlen(skb), PCI_DMA_TODEVICE);
3397
3398 tx_buf->skb = NULL;
3399
3400 last = skb_shinfo(skb)->nr_frags;
3401 for (j = 0; j < last; j++) {
3402 tx_buf = &bp->tx_buf_ring[i + j + 1];
3403 pci_unmap_page(bp->pdev,
3404 pci_unmap_addr(tx_buf, mapping),
3405 skb_shinfo(skb)->frags[j].size,
3406 PCI_DMA_TODEVICE);
3407 }
3408 dev_kfree_skb_any(skb);
3409 i += j + 1;
3410 }
3411
3412}
3413
3414static void
3415bnx2_free_rx_skbs(struct bnx2 *bp)
3416{
3417 int i;
3418
3419 if (bp->rx_buf_ring == NULL)
3420 return;
3421
3422 for (i = 0; i < RX_DESC_CNT; i++) {
3423 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3424 struct sk_buff *skb = rx_buf->skb;
3425
Michael Chan05d0f1c2005-11-04 08:53:48 -08003426 if (skb == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07003427 continue;
3428
3429 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3430 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3431
3432 rx_buf->skb = NULL;
3433
3434 dev_kfree_skb_any(skb);
3435 }
3436}
3437
3438static void
3439bnx2_free_skbs(struct bnx2 *bp)
3440{
3441 bnx2_free_tx_skbs(bp);
3442 bnx2_free_rx_skbs(bp);
3443}
3444
3445static int
3446bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3447{
3448 int rc;
3449
3450 rc = bnx2_reset_chip(bp, reset_code);
3451 bnx2_free_skbs(bp);
3452 if (rc)
3453 return rc;
3454
3455 bnx2_init_chip(bp);
3456 bnx2_init_tx_ring(bp);
3457 bnx2_init_rx_ring(bp);
3458 return 0;
3459}
3460
3461static int
3462bnx2_init_nic(struct bnx2 *bp)
3463{
3464 int rc;
3465
3466 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3467 return rc;
3468
3469 bnx2_init_phy(bp);
3470 bnx2_set_link(bp);
3471 return 0;
3472}
3473
3474static int
3475bnx2_test_registers(struct bnx2 *bp)
3476{
3477 int ret;
3478 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003479 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003480 u16 offset;
3481 u16 flags;
3482 u32 rw_mask;
3483 u32 ro_mask;
3484 } reg_tbl[] = {
3485 { 0x006c, 0, 0x00000000, 0x0000003f },
3486 { 0x0090, 0, 0xffffffff, 0x00000000 },
3487 { 0x0094, 0, 0x00000000, 0x00000000 },
3488
3489 { 0x0404, 0, 0x00003f00, 0x00000000 },
3490 { 0x0418, 0, 0x00000000, 0xffffffff },
3491 { 0x041c, 0, 0x00000000, 0xffffffff },
3492 { 0x0420, 0, 0x00000000, 0x80ffffff },
3493 { 0x0424, 0, 0x00000000, 0x00000000 },
3494 { 0x0428, 0, 0x00000000, 0x00000001 },
3495 { 0x0450, 0, 0x00000000, 0x0000ffff },
3496 { 0x0454, 0, 0x00000000, 0xffffffff },
3497 { 0x0458, 0, 0x00000000, 0xffffffff },
3498
3499 { 0x0808, 0, 0x00000000, 0xffffffff },
3500 { 0x0854, 0, 0x00000000, 0xffffffff },
3501 { 0x0868, 0, 0x00000000, 0x77777777 },
3502 { 0x086c, 0, 0x00000000, 0x77777777 },
3503 { 0x0870, 0, 0x00000000, 0x77777777 },
3504 { 0x0874, 0, 0x00000000, 0x77777777 },
3505
3506 { 0x0c00, 0, 0x00000000, 0x00000001 },
3507 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3508 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003509
3510 { 0x1000, 0, 0x00000000, 0x00000001 },
3511 { 0x1004, 0, 0x00000000, 0x000f0001 },
Michael Chanb6016b72005-05-26 13:03:09 -07003512
3513 { 0x1408, 0, 0x01c00800, 0x00000000 },
3514 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3515 { 0x14a8, 0, 0x00000000, 0x000001ff },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003516 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003517 { 0x14b0, 0, 0x00000002, 0x00000001 },
3518 { 0x14b8, 0, 0x00000000, 0x00000000 },
3519 { 0x14c0, 0, 0x00000000, 0x00000009 },
3520 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3521 { 0x14cc, 0, 0x00000000, 0x00000001 },
3522 { 0x14d0, 0, 0xffffffff, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003523
3524 { 0x1800, 0, 0x00000000, 0x00000001 },
3525 { 0x1804, 0, 0x00000000, 0x00000003 },
Michael Chanb6016b72005-05-26 13:03:09 -07003526
3527 { 0x2800, 0, 0x00000000, 0x00000001 },
3528 { 0x2804, 0, 0x00000000, 0x00003f01 },
3529 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3530 { 0x2810, 0, 0xffff0000, 0x00000000 },
3531 { 0x2814, 0, 0xffff0000, 0x00000000 },
3532 { 0x2818, 0, 0xffff0000, 0x00000000 },
3533 { 0x281c, 0, 0xffff0000, 0x00000000 },
3534 { 0x2834, 0, 0xffffffff, 0x00000000 },
3535 { 0x2840, 0, 0x00000000, 0xffffffff },
3536 { 0x2844, 0, 0x00000000, 0xffffffff },
3537 { 0x2848, 0, 0xffffffff, 0x00000000 },
3538 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3539
3540 { 0x2c00, 0, 0x00000000, 0x00000011 },
3541 { 0x2c04, 0, 0x00000000, 0x00030007 },
3542
Michael Chanb6016b72005-05-26 13:03:09 -07003543 { 0x3c00, 0, 0x00000000, 0x00000001 },
3544 { 0x3c04, 0, 0x00000000, 0x00070000 },
3545 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3546 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3547 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3548 { 0x3c14, 0, 0x00000000, 0xffffffff },
3549 { 0x3c18, 0, 0x00000000, 0xffffffff },
3550 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3551 { 0x3c20, 0, 0xffffff00, 0x00000000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003552
3553 { 0x5004, 0, 0x00000000, 0x0000007f },
3554 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3555 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3556
Michael Chanb6016b72005-05-26 13:03:09 -07003557 { 0x5c00, 0, 0x00000000, 0x00000001 },
3558 { 0x5c04, 0, 0x00000000, 0x0003000f },
3559 { 0x5c08, 0, 0x00000003, 0x00000000 },
3560 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3561 { 0x5c10, 0, 0x00000000, 0xffffffff },
3562 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3563 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3564 { 0x5c88, 0, 0x00000000, 0x00077373 },
3565 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3566
3567 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3568 { 0x680c, 0, 0xffffffff, 0x00000000 },
3569 { 0x6810, 0, 0xffffffff, 0x00000000 },
3570 { 0x6814, 0, 0xffffffff, 0x00000000 },
3571 { 0x6818, 0, 0xffffffff, 0x00000000 },
3572 { 0x681c, 0, 0xffffffff, 0x00000000 },
3573 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3574 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3575 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3576 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3577 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3578 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3579 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3580 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3581 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3582 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3583 { 0x684c, 0, 0xffffffff, 0x00000000 },
3584 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3585 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3586 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3587 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3588 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3589 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3590
3591 { 0xffff, 0, 0x00000000, 0x00000000 },
3592 };
3593
3594 ret = 0;
3595 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3596 u32 offset, rw_mask, ro_mask, save_val, val;
3597
3598 offset = (u32) reg_tbl[i].offset;
3599 rw_mask = reg_tbl[i].rw_mask;
3600 ro_mask = reg_tbl[i].ro_mask;
3601
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003602 save_val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003603
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003604 writel(0, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003605
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003606 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003607 if ((val & rw_mask) != 0) {
3608 goto reg_test_err;
3609 }
3610
3611 if ((val & ro_mask) != (save_val & ro_mask)) {
3612 goto reg_test_err;
3613 }
3614
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003615 writel(0xffffffff, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003616
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003617 val = readl(bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003618 if ((val & rw_mask) != rw_mask) {
3619 goto reg_test_err;
3620 }
3621
3622 if ((val & ro_mask) != (save_val & ro_mask)) {
3623 goto reg_test_err;
3624 }
3625
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003626 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003627 continue;
3628
3629reg_test_err:
Peter Hagervall14ab9b82005-08-10 14:18:16 -07003630 writel(save_val, bp->regview + offset);
Michael Chanb6016b72005-05-26 13:03:09 -07003631 ret = -ENODEV;
3632 break;
3633 }
3634 return ret;
3635}
3636
3637static int
3638bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3639{
Arjan van de Venf71e1302006-03-03 21:33:57 -05003640 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
Michael Chanb6016b72005-05-26 13:03:09 -07003641 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3642 int i;
3643
3644 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3645 u32 offset;
3646
3647 for (offset = 0; offset < size; offset += 4) {
3648
3649 REG_WR_IND(bp, start + offset, test_pattern[i]);
3650
3651 if (REG_RD_IND(bp, start + offset) !=
3652 test_pattern[i]) {
3653 return -ENODEV;
3654 }
3655 }
3656 }
3657 return 0;
3658}
3659
3660static int
3661bnx2_test_memory(struct bnx2 *bp)
3662{
3663 int ret = 0;
3664 int i;
Arjan van de Venf71e1302006-03-03 21:33:57 -05003665 static const struct {
Michael Chanb6016b72005-05-26 13:03:09 -07003666 u32 offset;
3667 u32 len;
3668 } mem_tbl[] = {
3669 { 0x60000, 0x4000 },
Michael Chan5b0c76a2005-11-04 08:45:49 -08003670 { 0xa0000, 0x3000 },
Michael Chanb6016b72005-05-26 13:03:09 -07003671 { 0xe0000, 0x4000 },
3672 { 0x120000, 0x4000 },
3673 { 0x1a0000, 0x4000 },
3674 { 0x160000, 0x4000 },
3675 { 0xffffffff, 0 },
3676 };
3677
3678 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3679 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3680 mem_tbl[i].len)) != 0) {
3681 return ret;
3682 }
3683 }
3684
3685 return ret;
3686}
3687
Michael Chanbc5a0692006-01-23 16:13:22 -08003688#define BNX2_MAC_LOOPBACK 0
3689#define BNX2_PHY_LOOPBACK 1
3690
Michael Chanb6016b72005-05-26 13:03:09 -07003691static int
Michael Chanbc5a0692006-01-23 16:13:22 -08003692bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
Michael Chanb6016b72005-05-26 13:03:09 -07003693{
3694 unsigned int pkt_size, num_pkts, i;
3695 struct sk_buff *skb, *rx_skb;
3696 unsigned char *packet;
Michael Chanbc5a0692006-01-23 16:13:22 -08003697 u16 rx_start_idx, rx_idx;
3698 u32 val;
Michael Chanb6016b72005-05-26 13:03:09 -07003699 dma_addr_t map;
3700 struct tx_bd *txbd;
3701 struct sw_bd *rx_buf;
3702 struct l2_fhdr *rx_hdr;
3703 int ret = -ENODEV;
3704
Michael Chanbc5a0692006-01-23 16:13:22 -08003705 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3706 bp->loopback = MAC_LOOPBACK;
3707 bnx2_set_mac_loopback(bp);
3708 }
3709 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3710 bp->loopback = 0;
3711 bnx2_set_phy_loopback(bp);
3712 }
3713 else
3714 return -EINVAL;
Michael Chanb6016b72005-05-26 13:03:09 -07003715
3716 pkt_size = 1514;
3717 skb = dev_alloc_skb(pkt_size);
John W. Linvilleb6cbc3b62005-11-10 12:58:00 -08003718 if (!skb)
3719 return -ENOMEM;
Michael Chanb6016b72005-05-26 13:03:09 -07003720 packet = skb_put(skb, pkt_size);
3721 memcpy(packet, bp->mac_addr, 6);
3722 memset(packet + 6, 0x0, 8);
3723 for (i = 14; i < pkt_size; i++)
3724 packet[i] = (unsigned char) (i & 0xff);
3725
3726 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3727 PCI_DMA_TODEVICE);
3728
3729 val = REG_RD(bp, BNX2_HC_COMMAND);
3730 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3731 REG_RD(bp, BNX2_HC_COMMAND);
3732
3733 udelay(5);
3734 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3735
Michael Chanb6016b72005-05-26 13:03:09 -07003736 num_pkts = 0;
3737
Michael Chanbc5a0692006-01-23 16:13:22 -08003738 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
Michael Chanb6016b72005-05-26 13:03:09 -07003739
3740 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3741 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3742 txbd->tx_bd_mss_nbytes = pkt_size;
3743 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3744
3745 num_pkts++;
Michael Chanbc5a0692006-01-23 16:13:22 -08003746 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3747 bp->tx_prod_bseq += pkt_size;
Michael Chanb6016b72005-05-26 13:03:09 -07003748
Michael Chanbc5a0692006-01-23 16:13:22 -08003749 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3750 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
Michael Chanb6016b72005-05-26 13:03:09 -07003751
3752 udelay(100);
3753
3754 val = REG_RD(bp, BNX2_HC_COMMAND);
3755 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3756 REG_RD(bp, BNX2_HC_COMMAND);
3757
3758 udelay(5);
3759
3760 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3761 dev_kfree_skb_irq(skb);
3762
Michael Chanbc5a0692006-01-23 16:13:22 -08003763 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
Michael Chanb6016b72005-05-26 13:03:09 -07003764 goto loopback_test_done;
3765 }
3766
3767 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3768 if (rx_idx != rx_start_idx + num_pkts) {
3769 goto loopback_test_done;
3770 }
3771
3772 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3773 rx_skb = rx_buf->skb;
3774
3775 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3776 skb_reserve(rx_skb, bp->rx_offset);
3777
3778 pci_dma_sync_single_for_cpu(bp->pdev,
3779 pci_unmap_addr(rx_buf, mapping),
3780 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3781
Michael Chanade2bfe2006-01-23 16:09:51 -08003782 if (rx_hdr->l2_fhdr_status &
Michael Chanb6016b72005-05-26 13:03:09 -07003783 (L2_FHDR_ERRORS_BAD_CRC |
3784 L2_FHDR_ERRORS_PHY_DECODE |
3785 L2_FHDR_ERRORS_ALIGNMENT |
3786 L2_FHDR_ERRORS_TOO_SHORT |
3787 L2_FHDR_ERRORS_GIANT_FRAME)) {
3788
3789 goto loopback_test_done;
3790 }
3791
3792 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3793 goto loopback_test_done;
3794 }
3795
3796 for (i = 14; i < pkt_size; i++) {
3797 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3798 goto loopback_test_done;
3799 }
3800 }
3801
3802 ret = 0;
3803
3804loopback_test_done:
3805 bp->loopback = 0;
3806 return ret;
3807}
3808
Michael Chanbc5a0692006-01-23 16:13:22 -08003809#define BNX2_MAC_LOOPBACK_FAILED 1
3810#define BNX2_PHY_LOOPBACK_FAILED 2
3811#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3812 BNX2_PHY_LOOPBACK_FAILED)
3813
3814static int
3815bnx2_test_loopback(struct bnx2 *bp)
3816{
3817 int rc = 0;
3818
3819 if (!netif_running(bp->dev))
3820 return BNX2_LOOPBACK_FAILED;
3821
3822 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3823 spin_lock_bh(&bp->phy_lock);
3824 bnx2_init_phy(bp);
3825 spin_unlock_bh(&bp->phy_lock);
3826 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3827 rc |= BNX2_MAC_LOOPBACK_FAILED;
3828 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3829 rc |= BNX2_PHY_LOOPBACK_FAILED;
3830 return rc;
3831}
3832
Michael Chanb6016b72005-05-26 13:03:09 -07003833#define NVRAM_SIZE 0x200
3834#define CRC32_RESIDUAL 0xdebb20e3
3835
3836static int
3837bnx2_test_nvram(struct bnx2 *bp)
3838{
3839 u32 buf[NVRAM_SIZE / 4];
3840 u8 *data = (u8 *) buf;
3841 int rc = 0;
3842 u32 magic, csum;
3843
3844 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3845 goto test_nvram_done;
3846
3847 magic = be32_to_cpu(buf[0]);
3848 if (magic != 0x669955aa) {
3849 rc = -ENODEV;
3850 goto test_nvram_done;
3851 }
3852
3853 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3854 goto test_nvram_done;
3855
3856 csum = ether_crc_le(0x100, data);
3857 if (csum != CRC32_RESIDUAL) {
3858 rc = -ENODEV;
3859 goto test_nvram_done;
3860 }
3861
3862 csum = ether_crc_le(0x100, data + 0x100);
3863 if (csum != CRC32_RESIDUAL) {
3864 rc = -ENODEV;
3865 }
3866
3867test_nvram_done:
3868 return rc;
3869}
3870
3871static int
3872bnx2_test_link(struct bnx2 *bp)
3873{
3874 u32 bmsr;
3875
Michael Chanc770a652005-08-25 15:38:39 -07003876 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003877 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3878 bnx2_read_phy(bp, MII_BMSR, &bmsr);
Michael Chanc770a652005-08-25 15:38:39 -07003879 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003880
3881 if (bmsr & BMSR_LSTATUS) {
3882 return 0;
3883 }
3884 return -ENODEV;
3885}
3886
3887static int
3888bnx2_test_intr(struct bnx2 *bp)
3889{
3890 int i;
3891 u32 val;
3892 u16 status_idx;
3893
3894 if (!netif_running(bp->dev))
3895 return -ENODEV;
3896
3897 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3898
3899 /* This register is not touched during run-time. */
3900 val = REG_RD(bp, BNX2_HC_COMMAND);
3901 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
3902 REG_RD(bp, BNX2_HC_COMMAND);
3903
3904 for (i = 0; i < 10; i++) {
3905 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3906 status_idx) {
3907
3908 break;
3909 }
3910
3911 msleep_interruptible(10);
3912 }
3913 if (i < 10)
3914 return 0;
3915
3916 return -ENODEV;
3917}
3918
3919static void
3920bnx2_timer(unsigned long data)
3921{
3922 struct bnx2 *bp = (struct bnx2 *) data;
3923 u32 msg;
3924
Michael Chancd339a02005-08-25 15:35:24 -07003925 if (!netif_running(bp->dev))
3926 return;
3927
Michael Chanb6016b72005-05-26 13:03:09 -07003928 if (atomic_read(&bp->intr_sem) != 0)
3929 goto bnx2_restart_timer;
3930
3931 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
Michael Chane3648b32005-11-04 08:51:21 -08003932 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
Michael Chanb6016b72005-05-26 13:03:09 -07003933
3934 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3935 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
Michael Chanb6016b72005-05-26 13:03:09 -07003936
Michael Chanc770a652005-08-25 15:38:39 -07003937 spin_lock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003938 if (bp->serdes_an_pending) {
3939 bp->serdes_an_pending--;
3940 }
3941 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3942 u32 bmcr;
3943
Michael Chancd339a02005-08-25 15:35:24 -07003944 bp->current_interval = bp->timer_interval;
3945
Michael Chanb6016b72005-05-26 13:03:09 -07003946 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3947
3948 if (bmcr & BMCR_ANENABLE) {
3949 u32 phy1, phy2;
3950
3951 bnx2_write_phy(bp, 0x1c, 0x7c00);
3952 bnx2_read_phy(bp, 0x1c, &phy1);
3953
3954 bnx2_write_phy(bp, 0x17, 0x0f01);
3955 bnx2_read_phy(bp, 0x15, &phy2);
3956 bnx2_write_phy(bp, 0x17, 0x0f01);
3957 bnx2_read_phy(bp, 0x15, &phy2);
3958
3959 if ((phy1 & 0x10) && /* SIGNAL DETECT */
3960 !(phy2 & 0x20)) { /* no CONFIG */
3961
3962 bmcr &= ~BMCR_ANENABLE;
3963 bmcr |= BMCR_SPEED1000 |
3964 BMCR_FULLDPLX;
3965 bnx2_write_phy(bp, MII_BMCR, bmcr);
3966 bp->phy_flags |=
3967 PHY_PARALLEL_DETECT_FLAG;
3968 }
3969 }
3970 }
3971 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
3972 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
3973 u32 phy2;
3974
3975 bnx2_write_phy(bp, 0x17, 0x0f01);
3976 bnx2_read_phy(bp, 0x15, &phy2);
3977 if (phy2 & 0x20) {
3978 u32 bmcr;
3979
3980 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3981 bmcr |= BMCR_ANENABLE;
3982 bnx2_write_phy(bp, MII_BMCR, bmcr);
3983
3984 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
3985
3986 }
3987 }
Michael Chancd339a02005-08-25 15:35:24 -07003988 else
3989 bp->current_interval = bp->timer_interval;
Michael Chanb6016b72005-05-26 13:03:09 -07003990
Michael Chanc770a652005-08-25 15:38:39 -07003991 spin_unlock(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07003992 }
3993
3994bnx2_restart_timer:
Michael Chancd339a02005-08-25 15:35:24 -07003995 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07003996}
3997
3998/* Called with rtnl_lock */
3999static int
4000bnx2_open(struct net_device *dev)
4001{
Michael Chan972ec0d2006-01-23 16:12:43 -08004002 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004003 int rc;
4004
Pavel Machek829ca9a2005-09-03 15:56:56 -07004005 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07004006 bnx2_disable_int(bp);
4007
4008 rc = bnx2_alloc_mem(bp);
4009 if (rc)
4010 return rc;
4011
4012 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4013 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4014 !disable_msi) {
4015
4016 if (pci_enable_msi(bp->pdev) == 0) {
4017 bp->flags |= USING_MSI_FLAG;
4018 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4019 dev);
4020 }
4021 else {
4022 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4023 SA_SHIRQ, dev->name, dev);
4024 }
4025 }
4026 else {
4027 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4028 dev->name, dev);
4029 }
4030 if (rc) {
4031 bnx2_free_mem(bp);
4032 return rc;
4033 }
4034
4035 rc = bnx2_init_nic(bp);
4036
4037 if (rc) {
4038 free_irq(bp->pdev->irq, dev);
4039 if (bp->flags & USING_MSI_FLAG) {
4040 pci_disable_msi(bp->pdev);
4041 bp->flags &= ~USING_MSI_FLAG;
4042 }
4043 bnx2_free_skbs(bp);
4044 bnx2_free_mem(bp);
4045 return rc;
4046 }
4047
Michael Chancd339a02005-08-25 15:35:24 -07004048 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004049
4050 atomic_set(&bp->intr_sem, 0);
4051
4052 bnx2_enable_int(bp);
4053
4054 if (bp->flags & USING_MSI_FLAG) {
4055 /* Test MSI to make sure it is working
4056 * If MSI test fails, go back to INTx mode
4057 */
4058 if (bnx2_test_intr(bp) != 0) {
4059 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4060 " using MSI, switching to INTx mode. Please"
4061 " report this failure to the PCI maintainer"
4062 " and include system chipset information.\n",
4063 bp->dev->name);
4064
4065 bnx2_disable_int(bp);
4066 free_irq(bp->pdev->irq, dev);
4067 pci_disable_msi(bp->pdev);
4068 bp->flags &= ~USING_MSI_FLAG;
4069
4070 rc = bnx2_init_nic(bp);
4071
4072 if (!rc) {
4073 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4074 SA_SHIRQ, dev->name, dev);
4075 }
4076 if (rc) {
4077 bnx2_free_skbs(bp);
4078 bnx2_free_mem(bp);
4079 del_timer_sync(&bp->timer);
4080 return rc;
4081 }
4082 bnx2_enable_int(bp);
4083 }
4084 }
4085 if (bp->flags & USING_MSI_FLAG) {
4086 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4087 }
4088
4089 netif_start_queue(dev);
4090
4091 return 0;
4092}
4093
4094static void
4095bnx2_reset_task(void *data)
4096{
4097 struct bnx2 *bp = data;
4098
Michael Chanafdc08b2005-08-25 15:34:29 -07004099 if (!netif_running(bp->dev))
4100 return;
4101
4102 bp->in_reset_task = 1;
Michael Chanb6016b72005-05-26 13:03:09 -07004103 bnx2_netif_stop(bp);
4104
4105 bnx2_init_nic(bp);
4106
4107 atomic_set(&bp->intr_sem, 1);
4108 bnx2_netif_start(bp);
Michael Chanafdc08b2005-08-25 15:34:29 -07004109 bp->in_reset_task = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004110}
4111
4112static void
4113bnx2_tx_timeout(struct net_device *dev)
4114{
Michael Chan972ec0d2006-01-23 16:12:43 -08004115 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004116
4117 /* This allows the netif to be shutdown gracefully before resetting */
4118 schedule_work(&bp->reset_task);
4119}
4120
4121#ifdef BCM_VLAN
4122/* Called with rtnl_lock */
4123static void
4124bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4125{
Michael Chan972ec0d2006-01-23 16:12:43 -08004126 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004127
4128 bnx2_netif_stop(bp);
4129
4130 bp->vlgrp = vlgrp;
4131 bnx2_set_rx_mode(dev);
4132
4133 bnx2_netif_start(bp);
4134}
4135
4136/* Called with rtnl_lock */
4137static void
4138bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4139{
Michael Chan972ec0d2006-01-23 16:12:43 -08004140 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004141
4142 bnx2_netif_stop(bp);
4143
4144 if (bp->vlgrp)
4145 bp->vlgrp->vlan_devices[vid] = NULL;
4146 bnx2_set_rx_mode(dev);
4147
4148 bnx2_netif_start(bp);
4149}
4150#endif
4151
4152/* Called with dev->xmit_lock.
4153 * hard_start_xmit is pseudo-lockless - a lock is only required when
4154 * the tx queue is full. This way, we get the benefit of lockless
4155 * operations most of the time without the complexities to handle
4156 * netif_stop_queue/wake_queue race conditions.
4157 */
4158static int
4159bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4160{
Michael Chan972ec0d2006-01-23 16:12:43 -08004161 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004162 dma_addr_t mapping;
4163 struct tx_bd *txbd;
4164 struct sw_bd *tx_buf;
4165 u32 len, vlan_tag_flags, last_frag, mss;
4166 u16 prod, ring_prod;
4167 int i;
4168
Michael Chane89bbf12005-08-25 15:36:58 -07004169 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chanb6016b72005-05-26 13:03:09 -07004170 netif_stop_queue(dev);
4171 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4172 dev->name);
4173
4174 return NETDEV_TX_BUSY;
4175 }
4176 len = skb_headlen(skb);
4177 prod = bp->tx_prod;
4178 ring_prod = TX_RING_IDX(prod);
4179
4180 vlan_tag_flags = 0;
4181 if (skb->ip_summed == CHECKSUM_HW) {
4182 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4183 }
4184
4185 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4186 vlan_tag_flags |=
4187 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4188 }
4189#ifdef BCM_TSO
4190 if ((mss = skb_shinfo(skb)->tso_size) &&
4191 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4192 u32 tcp_opt_len, ip_tcp_len;
4193
4194 if (skb_header_cloned(skb) &&
4195 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4196 dev_kfree_skb(skb);
4197 return NETDEV_TX_OK;
4198 }
4199
4200 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4201 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4202
4203 tcp_opt_len = 0;
4204 if (skb->h.th->doff > 5) {
4205 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4206 }
4207 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4208
4209 skb->nh.iph->check = 0;
4210 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4211 skb->h.th->check =
4212 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4213 skb->nh.iph->daddr,
4214 0, IPPROTO_TCP, 0);
4215
4216 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4217 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4218 (tcp_opt_len >> 2)) << 8;
4219 }
4220 }
4221 else
4222#endif
4223 {
4224 mss = 0;
4225 }
4226
4227 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4228
4229 tx_buf = &bp->tx_buf_ring[ring_prod];
4230 tx_buf->skb = skb;
4231 pci_unmap_addr_set(tx_buf, mapping, mapping);
4232
4233 txbd = &bp->tx_desc_ring[ring_prod];
4234
4235 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4236 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4237 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4238 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4239
4240 last_frag = skb_shinfo(skb)->nr_frags;
4241
4242 for (i = 0; i < last_frag; i++) {
4243 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4244
4245 prod = NEXT_TX_BD(prod);
4246 ring_prod = TX_RING_IDX(prod);
4247 txbd = &bp->tx_desc_ring[ring_prod];
4248
4249 len = frag->size;
4250 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4251 len, PCI_DMA_TODEVICE);
4252 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4253 mapping, mapping);
4254
4255 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4256 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4257 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4258 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4259
4260 }
4261 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4262
4263 prod = NEXT_TX_BD(prod);
4264 bp->tx_prod_bseq += skb->len;
4265
Michael Chanb6016b72005-05-26 13:03:09 -07004266 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4267 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4268
4269 mmiowb();
4270
4271 bp->tx_prod = prod;
4272 dev->trans_start = jiffies;
4273
Michael Chane89bbf12005-08-25 15:36:58 -07004274 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
Michael Chanc770a652005-08-25 15:38:39 -07004275 spin_lock(&bp->tx_lock);
Michael Chane89bbf12005-08-25 15:36:58 -07004276 netif_stop_queue(dev);
4277
4278 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4279 netif_wake_queue(dev);
Michael Chanc770a652005-08-25 15:38:39 -07004280 spin_unlock(&bp->tx_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004281 }
4282
4283 return NETDEV_TX_OK;
4284}
4285
4286/* Called with rtnl_lock */
4287static int
4288bnx2_close(struct net_device *dev)
4289{
Michael Chan972ec0d2006-01-23 16:12:43 -08004290 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004291 u32 reset_code;
4292
Michael Chanafdc08b2005-08-25 15:34:29 -07004293 /* Calling flush_scheduled_work() may deadlock because
4294 * linkwatch_event() may be on the workqueue and it will try to get
4295 * the rtnl_lock which we are holding.
4296 */
4297 while (bp->in_reset_task)
4298 msleep(1);
4299
Michael Chanb6016b72005-05-26 13:03:09 -07004300 bnx2_netif_stop(bp);
4301 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08004302 if (bp->flags & NO_WOL_FLAG)
4303 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4304 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07004305 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4306 else
4307 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4308 bnx2_reset_chip(bp, reset_code);
4309 free_irq(bp->pdev->irq, dev);
4310 if (bp->flags & USING_MSI_FLAG) {
4311 pci_disable_msi(bp->pdev);
4312 bp->flags &= ~USING_MSI_FLAG;
4313 }
4314 bnx2_free_skbs(bp);
4315 bnx2_free_mem(bp);
4316 bp->link_up = 0;
4317 netif_carrier_off(bp->dev);
Pavel Machek829ca9a2005-09-03 15:56:56 -07004318 bnx2_set_power_state(bp, PCI_D3hot);
Michael Chanb6016b72005-05-26 13:03:09 -07004319 return 0;
4320}
4321
4322#define GET_NET_STATS64(ctr) \
4323 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4324 (unsigned long) (ctr##_lo)
4325
4326#define GET_NET_STATS32(ctr) \
4327 (ctr##_lo)
4328
4329#if (BITS_PER_LONG == 64)
4330#define GET_NET_STATS GET_NET_STATS64
4331#else
4332#define GET_NET_STATS GET_NET_STATS32
4333#endif
4334
4335static struct net_device_stats *
4336bnx2_get_stats(struct net_device *dev)
4337{
Michael Chan972ec0d2006-01-23 16:12:43 -08004338 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004339 struct statistics_block *stats_blk = bp->stats_blk;
4340 struct net_device_stats *net_stats = &bp->net_stats;
4341
4342 if (bp->stats_blk == NULL) {
4343 return net_stats;
4344 }
4345 net_stats->rx_packets =
4346 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4347 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4348 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4349
4350 net_stats->tx_packets =
4351 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4352 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4353 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4354
4355 net_stats->rx_bytes =
4356 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4357
4358 net_stats->tx_bytes =
4359 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4360
4361 net_stats->multicast =
4362 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4363
4364 net_stats->collisions =
4365 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4366
4367 net_stats->rx_length_errors =
4368 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4369 stats_blk->stat_EtherStatsOverrsizePkts);
4370
4371 net_stats->rx_over_errors =
4372 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4373
4374 net_stats->rx_frame_errors =
4375 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4376
4377 net_stats->rx_crc_errors =
4378 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4379
4380 net_stats->rx_errors = net_stats->rx_length_errors +
4381 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4382 net_stats->rx_crc_errors;
4383
4384 net_stats->tx_aborted_errors =
4385 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4386 stats_blk->stat_Dot3StatsLateCollisions);
4387
Michael Chan5b0c76a2005-11-04 08:45:49 -08004388 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4389 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07004390 net_stats->tx_carrier_errors = 0;
4391 else {
4392 net_stats->tx_carrier_errors =
4393 (unsigned long)
4394 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4395 }
4396
4397 net_stats->tx_errors =
4398 (unsigned long)
4399 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4400 +
4401 net_stats->tx_aborted_errors +
4402 net_stats->tx_carrier_errors;
4403
4404 return net_stats;
4405}
4406
4407/* All ethtool functions called with rtnl_lock */
4408
4409static int
4410bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4411{
Michael Chan972ec0d2006-01-23 16:12:43 -08004412 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004413
4414 cmd->supported = SUPPORTED_Autoneg;
4415 if (bp->phy_flags & PHY_SERDES_FLAG) {
4416 cmd->supported |= SUPPORTED_1000baseT_Full |
4417 SUPPORTED_FIBRE;
4418
4419 cmd->port = PORT_FIBRE;
4420 }
4421 else {
4422 cmd->supported |= SUPPORTED_10baseT_Half |
4423 SUPPORTED_10baseT_Full |
4424 SUPPORTED_100baseT_Half |
4425 SUPPORTED_100baseT_Full |
4426 SUPPORTED_1000baseT_Full |
4427 SUPPORTED_TP;
4428
4429 cmd->port = PORT_TP;
4430 }
4431
4432 cmd->advertising = bp->advertising;
4433
4434 if (bp->autoneg & AUTONEG_SPEED) {
4435 cmd->autoneg = AUTONEG_ENABLE;
4436 }
4437 else {
4438 cmd->autoneg = AUTONEG_DISABLE;
4439 }
4440
4441 if (netif_carrier_ok(dev)) {
4442 cmd->speed = bp->line_speed;
4443 cmd->duplex = bp->duplex;
4444 }
4445 else {
4446 cmd->speed = -1;
4447 cmd->duplex = -1;
4448 }
4449
4450 cmd->transceiver = XCVR_INTERNAL;
4451 cmd->phy_address = bp->phy_addr;
4452
4453 return 0;
4454}
4455
4456static int
4457bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4458{
Michael Chan972ec0d2006-01-23 16:12:43 -08004459 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004460 u8 autoneg = bp->autoneg;
4461 u8 req_duplex = bp->req_duplex;
4462 u16 req_line_speed = bp->req_line_speed;
4463 u32 advertising = bp->advertising;
4464
4465 if (cmd->autoneg == AUTONEG_ENABLE) {
4466 autoneg |= AUTONEG_SPEED;
4467
4468 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4469
4470 /* allow advertising 1 speed */
4471 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4472 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4473 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4474 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4475
4476 if (bp->phy_flags & PHY_SERDES_FLAG)
4477 return -EINVAL;
4478
4479 advertising = cmd->advertising;
4480
4481 }
4482 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4483 advertising = cmd->advertising;
4484 }
4485 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4486 return -EINVAL;
4487 }
4488 else {
4489 if (bp->phy_flags & PHY_SERDES_FLAG) {
4490 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4491 }
4492 else {
4493 advertising = ETHTOOL_ALL_COPPER_SPEED;
4494 }
4495 }
4496 advertising |= ADVERTISED_Autoneg;
4497 }
4498 else {
4499 if (bp->phy_flags & PHY_SERDES_FLAG) {
4500 if ((cmd->speed != SPEED_1000) ||
4501 (cmd->duplex != DUPLEX_FULL)) {
4502 return -EINVAL;
4503 }
4504 }
4505 else if (cmd->speed == SPEED_1000) {
4506 return -EINVAL;
4507 }
4508 autoneg &= ~AUTONEG_SPEED;
4509 req_line_speed = cmd->speed;
4510 req_duplex = cmd->duplex;
4511 advertising = 0;
4512 }
4513
4514 bp->autoneg = autoneg;
4515 bp->advertising = advertising;
4516 bp->req_line_speed = req_line_speed;
4517 bp->req_duplex = req_duplex;
4518
Michael Chanc770a652005-08-25 15:38:39 -07004519 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004520
4521 bnx2_setup_phy(bp);
4522
Michael Chanc770a652005-08-25 15:38:39 -07004523 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004524
4525 return 0;
4526}
4527
4528static void
4529bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4530{
Michael Chan972ec0d2006-01-23 16:12:43 -08004531 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004532
4533 strcpy(info->driver, DRV_MODULE_NAME);
4534 strcpy(info->version, DRV_MODULE_VERSION);
4535 strcpy(info->bus_info, pci_name(bp->pdev));
4536 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4537 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4538 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
Michael Chan206cc832006-01-23 16:14:05 -08004539 info->fw_version[1] = info->fw_version[3] = '.';
4540 info->fw_version[5] = 0;
Michael Chanb6016b72005-05-26 13:03:09 -07004541}
4542
Michael Chan244ac4f2006-03-20 17:48:46 -08004543#define BNX2_REGDUMP_LEN (32 * 1024)
4544
4545static int
4546bnx2_get_regs_len(struct net_device *dev)
4547{
4548 return BNX2_REGDUMP_LEN;
4549}
4550
4551static void
4552bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4553{
4554 u32 *p = _p, i, offset;
4555 u8 *orig_p = _p;
4556 struct bnx2 *bp = netdev_priv(dev);
4557 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4558 0x0800, 0x0880, 0x0c00, 0x0c10,
4559 0x0c30, 0x0d08, 0x1000, 0x101c,
4560 0x1040, 0x1048, 0x1080, 0x10a4,
4561 0x1400, 0x1490, 0x1498, 0x14f0,
4562 0x1500, 0x155c, 0x1580, 0x15dc,
4563 0x1600, 0x1658, 0x1680, 0x16d8,
4564 0x1800, 0x1820, 0x1840, 0x1854,
4565 0x1880, 0x1894, 0x1900, 0x1984,
4566 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4567 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4568 0x2000, 0x2030, 0x23c0, 0x2400,
4569 0x2800, 0x2820, 0x2830, 0x2850,
4570 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4571 0x3c00, 0x3c94, 0x4000, 0x4010,
4572 0x4080, 0x4090, 0x43c0, 0x4458,
4573 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4574 0x4fc0, 0x5010, 0x53c0, 0x5444,
4575 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4576 0x5fc0, 0x6000, 0x6400, 0x6428,
4577 0x6800, 0x6848, 0x684c, 0x6860,
4578 0x6888, 0x6910, 0x8000 };
4579
4580 regs->version = 0;
4581
4582 memset(p, 0, BNX2_REGDUMP_LEN);
4583
4584 if (!netif_running(bp->dev))
4585 return;
4586
4587 i = 0;
4588 offset = reg_boundaries[0];
4589 p += offset;
4590 while (offset < BNX2_REGDUMP_LEN) {
4591 *p++ = REG_RD(bp, offset);
4592 offset += 4;
4593 if (offset == reg_boundaries[i + 1]) {
4594 offset = reg_boundaries[i + 2];
4595 p = (u32 *) (orig_p + offset);
4596 i += 2;
4597 }
4598 }
4599}
4600
Michael Chanb6016b72005-05-26 13:03:09 -07004601static void
4602bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4603{
Michael Chan972ec0d2006-01-23 16:12:43 -08004604 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004605
4606 if (bp->flags & NO_WOL_FLAG) {
4607 wol->supported = 0;
4608 wol->wolopts = 0;
4609 }
4610 else {
4611 wol->supported = WAKE_MAGIC;
4612 if (bp->wol)
4613 wol->wolopts = WAKE_MAGIC;
4614 else
4615 wol->wolopts = 0;
4616 }
4617 memset(&wol->sopass, 0, sizeof(wol->sopass));
4618}
4619
4620static int
4621bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4622{
Michael Chan972ec0d2006-01-23 16:12:43 -08004623 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004624
4625 if (wol->wolopts & ~WAKE_MAGIC)
4626 return -EINVAL;
4627
4628 if (wol->wolopts & WAKE_MAGIC) {
4629 if (bp->flags & NO_WOL_FLAG)
4630 return -EINVAL;
4631
4632 bp->wol = 1;
4633 }
4634 else {
4635 bp->wol = 0;
4636 }
4637 return 0;
4638}
4639
4640static int
4641bnx2_nway_reset(struct net_device *dev)
4642{
Michael Chan972ec0d2006-01-23 16:12:43 -08004643 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004644 u32 bmcr;
4645
4646 if (!(bp->autoneg & AUTONEG_SPEED)) {
4647 return -EINVAL;
4648 }
4649
Michael Chanc770a652005-08-25 15:38:39 -07004650 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004651
4652 /* Force a link down visible on the other side */
4653 if (bp->phy_flags & PHY_SERDES_FLAG) {
4654 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
Michael Chanc770a652005-08-25 15:38:39 -07004655 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004656
4657 msleep(20);
4658
Michael Chanc770a652005-08-25 15:38:39 -07004659 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004660 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
Michael Chancd339a02005-08-25 15:35:24 -07004661 bp->current_interval = SERDES_AN_TIMEOUT;
4662 bp->serdes_an_pending = 1;
4663 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chanb6016b72005-05-26 13:03:09 -07004664 }
4665 }
4666
4667 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4668 bmcr &= ~BMCR_LOOPBACK;
4669 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4670
Michael Chanc770a652005-08-25 15:38:39 -07004671 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004672
4673 return 0;
4674}
4675
4676static int
4677bnx2_get_eeprom_len(struct net_device *dev)
4678{
Michael Chan972ec0d2006-01-23 16:12:43 -08004679 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004680
Michael Chan1122db72006-01-23 16:11:42 -08004681 if (bp->flash_info == NULL)
Michael Chanb6016b72005-05-26 13:03:09 -07004682 return 0;
4683
Michael Chan1122db72006-01-23 16:11:42 -08004684 return (int) bp->flash_size;
Michael Chanb6016b72005-05-26 13:03:09 -07004685}
4686
4687static int
4688bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4689 u8 *eebuf)
4690{
Michael Chan972ec0d2006-01-23 16:12:43 -08004691 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004692 int rc;
4693
John W. Linville1064e942005-11-10 12:58:24 -08004694 /* parameters already validated in ethtool_get_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004695
4696 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4697
4698 return rc;
4699}
4700
4701static int
4702bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4703 u8 *eebuf)
4704{
Michael Chan972ec0d2006-01-23 16:12:43 -08004705 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004706 int rc;
4707
John W. Linville1064e942005-11-10 12:58:24 -08004708 /* parameters already validated in ethtool_set_eeprom */
Michael Chanb6016b72005-05-26 13:03:09 -07004709
4710 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4711
4712 return rc;
4713}
4714
4715static int
4716bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4717{
Michael Chan972ec0d2006-01-23 16:12:43 -08004718 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004719
4720 memset(coal, 0, sizeof(struct ethtool_coalesce));
4721
4722 coal->rx_coalesce_usecs = bp->rx_ticks;
4723 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4724 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4725 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4726
4727 coal->tx_coalesce_usecs = bp->tx_ticks;
4728 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4729 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4730 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4731
4732 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4733
4734 return 0;
4735}
4736
4737static int
4738bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4739{
Michael Chan972ec0d2006-01-23 16:12:43 -08004740 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004741
4742 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4743 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4744
4745 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4746 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4747
4748 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4749 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4750
4751 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4752 if (bp->rx_quick_cons_trip_int > 0xff)
4753 bp->rx_quick_cons_trip_int = 0xff;
4754
4755 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4756 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4757
4758 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4759 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4760
4761 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4762 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4763
4764 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4765 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4766 0xff;
4767
4768 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4769 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4770 bp->stats_ticks &= 0xffff00;
4771
4772 if (netif_running(bp->dev)) {
4773 bnx2_netif_stop(bp);
4774 bnx2_init_nic(bp);
4775 bnx2_netif_start(bp);
4776 }
4777
4778 return 0;
4779}
4780
4781static void
4782bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4783{
Michael Chan972ec0d2006-01-23 16:12:43 -08004784 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004785
4786 ering->rx_max_pending = MAX_RX_DESC_CNT;
4787 ering->rx_mini_max_pending = 0;
4788 ering->rx_jumbo_max_pending = 0;
4789
4790 ering->rx_pending = bp->rx_ring_size;
4791 ering->rx_mini_pending = 0;
4792 ering->rx_jumbo_pending = 0;
4793
4794 ering->tx_max_pending = MAX_TX_DESC_CNT;
4795 ering->tx_pending = bp->tx_ring_size;
4796}
4797
4798static int
4799bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4800{
Michael Chan972ec0d2006-01-23 16:12:43 -08004801 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004802
4803 if ((ering->rx_pending > MAX_RX_DESC_CNT) ||
4804 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4805 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4806
4807 return -EINVAL;
4808 }
4809 bp->rx_ring_size = ering->rx_pending;
4810 bp->tx_ring_size = ering->tx_pending;
4811
4812 if (netif_running(bp->dev)) {
4813 bnx2_netif_stop(bp);
4814 bnx2_init_nic(bp);
4815 bnx2_netif_start(bp);
4816 }
4817
4818 return 0;
4819}
4820
4821static void
4822bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4823{
Michael Chan972ec0d2006-01-23 16:12:43 -08004824 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004825
4826 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4827 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4828 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4829}
4830
4831static int
4832bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4833{
Michael Chan972ec0d2006-01-23 16:12:43 -08004834 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004835
4836 bp->req_flow_ctrl = 0;
4837 if (epause->rx_pause)
4838 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4839 if (epause->tx_pause)
4840 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4841
4842 if (epause->autoneg) {
4843 bp->autoneg |= AUTONEG_FLOW_CTRL;
4844 }
4845 else {
4846 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4847 }
4848
Michael Chanc770a652005-08-25 15:38:39 -07004849 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004850
4851 bnx2_setup_phy(bp);
4852
Michael Chanc770a652005-08-25 15:38:39 -07004853 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07004854
4855 return 0;
4856}
4857
4858static u32
4859bnx2_get_rx_csum(struct net_device *dev)
4860{
Michael Chan972ec0d2006-01-23 16:12:43 -08004861 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004862
4863 return bp->rx_csum;
4864}
4865
4866static int
4867bnx2_set_rx_csum(struct net_device *dev, u32 data)
4868{
Michael Chan972ec0d2006-01-23 16:12:43 -08004869 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07004870
4871 bp->rx_csum = data;
4872 return 0;
4873}
4874
4875#define BNX2_NUM_STATS 45
4876
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004877static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004878 char string[ETH_GSTRING_LEN];
4879} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4880 { "rx_bytes" },
4881 { "rx_error_bytes" },
4882 { "tx_bytes" },
4883 { "tx_error_bytes" },
4884 { "rx_ucast_packets" },
4885 { "rx_mcast_packets" },
4886 { "rx_bcast_packets" },
4887 { "tx_ucast_packets" },
4888 { "tx_mcast_packets" },
4889 { "tx_bcast_packets" },
4890 { "tx_mac_errors" },
4891 { "tx_carrier_errors" },
4892 { "rx_crc_errors" },
4893 { "rx_align_errors" },
4894 { "tx_single_collisions" },
4895 { "tx_multi_collisions" },
4896 { "tx_deferred" },
4897 { "tx_excess_collisions" },
4898 { "tx_late_collisions" },
4899 { "tx_total_collisions" },
4900 { "rx_fragments" },
4901 { "rx_jabbers" },
4902 { "rx_undersize_packets" },
4903 { "rx_oversize_packets" },
4904 { "rx_64_byte_packets" },
4905 { "rx_65_to_127_byte_packets" },
4906 { "rx_128_to_255_byte_packets" },
4907 { "rx_256_to_511_byte_packets" },
4908 { "rx_512_to_1023_byte_packets" },
4909 { "rx_1024_to_1522_byte_packets" },
4910 { "rx_1523_to_9022_byte_packets" },
4911 { "tx_64_byte_packets" },
4912 { "tx_65_to_127_byte_packets" },
4913 { "tx_128_to_255_byte_packets" },
4914 { "tx_256_to_511_byte_packets" },
4915 { "tx_512_to_1023_byte_packets" },
4916 { "tx_1024_to_1522_byte_packets" },
4917 { "tx_1523_to_9022_byte_packets" },
4918 { "rx_xon_frames" },
4919 { "rx_xoff_frames" },
4920 { "tx_xon_frames" },
4921 { "tx_xoff_frames" },
4922 { "rx_mac_ctrl_frames" },
4923 { "rx_filtered_packets" },
4924 { "rx_discards" },
4925};
4926
4927#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4928
Arjan van de Venf71e1302006-03-03 21:33:57 -05004929static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004930 STATS_OFFSET32(stat_IfHCInOctets_hi),
4931 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4932 STATS_OFFSET32(stat_IfHCOutOctets_hi),
4933 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4934 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4935 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4936 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
4937 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
4938 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
4939 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
4940 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
4941 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
4942 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
4943 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
4944 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
4945 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
4946 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
4947 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
4948 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
4949 STATS_OFFSET32(stat_EtherStatsCollisions),
4950 STATS_OFFSET32(stat_EtherStatsFragments),
4951 STATS_OFFSET32(stat_EtherStatsJabbers),
4952 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
4953 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
4954 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
4955 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
4956 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
4957 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
4958 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
4959 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
4960 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
4961 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
4962 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
4963 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
4964 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
4965 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
4966 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
4967 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
4968 STATS_OFFSET32(stat_XonPauseFramesReceived),
4969 STATS_OFFSET32(stat_XoffPauseFramesReceived),
4970 STATS_OFFSET32(stat_OutXonSent),
4971 STATS_OFFSET32(stat_OutXoffSent),
4972 STATS_OFFSET32(stat_MacControlFramesReceived),
4973 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
4974 STATS_OFFSET32(stat_IfInMBUFDiscards),
4975};
4976
4977/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4978 * skipped because of errata.
4979 */
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004980static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
Michael Chanb6016b72005-05-26 13:03:09 -07004981 8,0,8,8,8,8,8,8,8,8,
4982 4,0,4,4,4,4,4,4,4,4,
4983 4,4,4,4,4,4,4,4,4,4,
4984 4,4,4,4,4,4,4,4,4,4,
4985 4,4,4,4,4,
4986};
4987
Michael Chan5b0c76a2005-11-04 08:45:49 -08004988static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
4989 8,0,8,8,8,8,8,8,8,8,
4990 4,4,4,4,4,4,4,4,4,4,
4991 4,4,4,4,4,4,4,4,4,4,
4992 4,4,4,4,4,4,4,4,4,4,
4993 4,4,4,4,4,
4994};
4995
Michael Chanb6016b72005-05-26 13:03:09 -07004996#define BNX2_NUM_TESTS 6
4997
Peter Hagervall14ab9b82005-08-10 14:18:16 -07004998static struct {
Michael Chanb6016b72005-05-26 13:03:09 -07004999 char string[ETH_GSTRING_LEN];
5000} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5001 { "register_test (offline)" },
5002 { "memory_test (offline)" },
5003 { "loopback_test (offline)" },
5004 { "nvram_test (online)" },
5005 { "interrupt_test (online)" },
5006 { "link_test (online)" },
5007};
5008
5009static int
5010bnx2_self_test_count(struct net_device *dev)
5011{
5012 return BNX2_NUM_TESTS;
5013}
5014
5015static void
5016bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5017{
Michael Chan972ec0d2006-01-23 16:12:43 -08005018 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005019
5020 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5021 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5022 bnx2_netif_stop(bp);
5023 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5024 bnx2_free_skbs(bp);
5025
5026 if (bnx2_test_registers(bp) != 0) {
5027 buf[0] = 1;
5028 etest->flags |= ETH_TEST_FL_FAILED;
5029 }
5030 if (bnx2_test_memory(bp) != 0) {
5031 buf[1] = 1;
5032 etest->flags |= ETH_TEST_FL_FAILED;
5033 }
Michael Chanbc5a0692006-01-23 16:13:22 -08005034 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
Michael Chanb6016b72005-05-26 13:03:09 -07005035 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chanb6016b72005-05-26 13:03:09 -07005036
5037 if (!netif_running(bp->dev)) {
5038 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5039 }
5040 else {
5041 bnx2_init_nic(bp);
5042 bnx2_netif_start(bp);
5043 }
5044
5045 /* wait for link up */
5046 msleep_interruptible(3000);
5047 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5048 msleep_interruptible(4000);
5049 }
5050
5051 if (bnx2_test_nvram(bp) != 0) {
5052 buf[3] = 1;
5053 etest->flags |= ETH_TEST_FL_FAILED;
5054 }
5055 if (bnx2_test_intr(bp) != 0) {
5056 buf[4] = 1;
5057 etest->flags |= ETH_TEST_FL_FAILED;
5058 }
5059
5060 if (bnx2_test_link(bp) != 0) {
5061 buf[5] = 1;
5062 etest->flags |= ETH_TEST_FL_FAILED;
5063
5064 }
5065}
5066
5067static void
5068bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5069{
5070 switch (stringset) {
5071 case ETH_SS_STATS:
5072 memcpy(buf, bnx2_stats_str_arr,
5073 sizeof(bnx2_stats_str_arr));
5074 break;
5075 case ETH_SS_TEST:
5076 memcpy(buf, bnx2_tests_str_arr,
5077 sizeof(bnx2_tests_str_arr));
5078 break;
5079 }
5080}
5081
5082static int
5083bnx2_get_stats_count(struct net_device *dev)
5084{
5085 return BNX2_NUM_STATS;
5086}
5087
5088static void
5089bnx2_get_ethtool_stats(struct net_device *dev,
5090 struct ethtool_stats *stats, u64 *buf)
5091{
Michael Chan972ec0d2006-01-23 16:12:43 -08005092 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005093 int i;
5094 u32 *hw_stats = (u32 *) bp->stats_blk;
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005095 u8 *stats_len_arr = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005096
5097 if (hw_stats == NULL) {
5098 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5099 return;
5100 }
5101
Michael Chan5b0c76a2005-11-04 08:45:49 -08005102 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5103 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5104 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5105 (CHIP_ID(bp) == CHIP_ID_5708_A0))
Michael Chanb6016b72005-05-26 13:03:09 -07005106 stats_len_arr = bnx2_5706_stats_len_arr;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005107 else
5108 stats_len_arr = bnx2_5708_stats_len_arr;
Michael Chanb6016b72005-05-26 13:03:09 -07005109
5110 for (i = 0; i < BNX2_NUM_STATS; i++) {
5111 if (stats_len_arr[i] == 0) {
5112 /* skip this counter */
5113 buf[i] = 0;
5114 continue;
5115 }
5116 if (stats_len_arr[i] == 4) {
5117 /* 4-byte counter */
5118 buf[i] = (u64)
5119 *(hw_stats + bnx2_stats_offset_arr[i]);
5120 continue;
5121 }
5122 /* 8-byte counter */
5123 buf[i] = (((u64) *(hw_stats +
5124 bnx2_stats_offset_arr[i])) << 32) +
5125 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5126 }
5127}
5128
5129static int
5130bnx2_phys_id(struct net_device *dev, u32 data)
5131{
Michael Chan972ec0d2006-01-23 16:12:43 -08005132 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005133 int i;
5134 u32 save;
5135
5136 if (data == 0)
5137 data = 2;
5138
5139 save = REG_RD(bp, BNX2_MISC_CFG);
5140 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5141
5142 for (i = 0; i < (data * 2); i++) {
5143 if ((i % 2) == 0) {
5144 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5145 }
5146 else {
5147 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5148 BNX2_EMAC_LED_1000MB_OVERRIDE |
5149 BNX2_EMAC_LED_100MB_OVERRIDE |
5150 BNX2_EMAC_LED_10MB_OVERRIDE |
5151 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5152 BNX2_EMAC_LED_TRAFFIC);
5153 }
5154 msleep_interruptible(500);
5155 if (signal_pending(current))
5156 break;
5157 }
5158 REG_WR(bp, BNX2_EMAC_LED, 0);
5159 REG_WR(bp, BNX2_MISC_CFG, save);
5160 return 0;
5161}
5162
5163static struct ethtool_ops bnx2_ethtool_ops = {
5164 .get_settings = bnx2_get_settings,
5165 .set_settings = bnx2_set_settings,
5166 .get_drvinfo = bnx2_get_drvinfo,
Michael Chan244ac4f2006-03-20 17:48:46 -08005167 .get_regs_len = bnx2_get_regs_len,
5168 .get_regs = bnx2_get_regs,
Michael Chanb6016b72005-05-26 13:03:09 -07005169 .get_wol = bnx2_get_wol,
5170 .set_wol = bnx2_set_wol,
5171 .nway_reset = bnx2_nway_reset,
5172 .get_link = ethtool_op_get_link,
5173 .get_eeprom_len = bnx2_get_eeprom_len,
5174 .get_eeprom = bnx2_get_eeprom,
5175 .set_eeprom = bnx2_set_eeprom,
5176 .get_coalesce = bnx2_get_coalesce,
5177 .set_coalesce = bnx2_set_coalesce,
5178 .get_ringparam = bnx2_get_ringparam,
5179 .set_ringparam = bnx2_set_ringparam,
5180 .get_pauseparam = bnx2_get_pauseparam,
5181 .set_pauseparam = bnx2_set_pauseparam,
5182 .get_rx_csum = bnx2_get_rx_csum,
5183 .set_rx_csum = bnx2_set_rx_csum,
5184 .get_tx_csum = ethtool_op_get_tx_csum,
5185 .set_tx_csum = ethtool_op_set_tx_csum,
5186 .get_sg = ethtool_op_get_sg,
5187 .set_sg = ethtool_op_set_sg,
5188#ifdef BCM_TSO
5189 .get_tso = ethtool_op_get_tso,
5190 .set_tso = ethtool_op_set_tso,
5191#endif
5192 .self_test_count = bnx2_self_test_count,
5193 .self_test = bnx2_self_test,
5194 .get_strings = bnx2_get_strings,
5195 .phys_id = bnx2_phys_id,
5196 .get_stats_count = bnx2_get_stats_count,
5197 .get_ethtool_stats = bnx2_get_ethtool_stats,
John W. Linville24b8e052005-09-12 14:45:08 -07005198 .get_perm_addr = ethtool_op_get_perm_addr,
Michael Chanb6016b72005-05-26 13:03:09 -07005199};
5200
5201/* Called with rtnl_lock */
5202static int
5203bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5204{
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005205 struct mii_ioctl_data *data = if_mii(ifr);
Michael Chan972ec0d2006-01-23 16:12:43 -08005206 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005207 int err;
5208
5209 switch(cmd) {
5210 case SIOCGMIIPHY:
5211 data->phy_id = bp->phy_addr;
5212
5213 /* fallthru */
5214 case SIOCGMIIREG: {
5215 u32 mii_regval;
5216
Michael Chanc770a652005-08-25 15:38:39 -07005217 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005218 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
Michael Chanc770a652005-08-25 15:38:39 -07005219 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005220
5221 data->val_out = mii_regval;
5222
5223 return err;
5224 }
5225
5226 case SIOCSMIIREG:
5227 if (!capable(CAP_NET_ADMIN))
5228 return -EPERM;
5229
Michael Chanc770a652005-08-25 15:38:39 -07005230 spin_lock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005231 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
Michael Chanc770a652005-08-25 15:38:39 -07005232 spin_unlock_bh(&bp->phy_lock);
Michael Chanb6016b72005-05-26 13:03:09 -07005233
5234 return err;
5235
5236 default:
5237 /* do nothing */
5238 break;
5239 }
5240 return -EOPNOTSUPP;
5241}
5242
5243/* Called with rtnl_lock */
5244static int
5245bnx2_change_mac_addr(struct net_device *dev, void *p)
5246{
5247 struct sockaddr *addr = p;
Michael Chan972ec0d2006-01-23 16:12:43 -08005248 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005249
Michael Chan73eef4c2005-08-25 15:39:15 -07005250 if (!is_valid_ether_addr(addr->sa_data))
5251 return -EINVAL;
5252
Michael Chanb6016b72005-05-26 13:03:09 -07005253 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5254 if (netif_running(dev))
5255 bnx2_set_mac_addr(bp);
5256
5257 return 0;
5258}
5259
5260/* Called with rtnl_lock */
5261static int
5262bnx2_change_mtu(struct net_device *dev, int new_mtu)
5263{
Michael Chan972ec0d2006-01-23 16:12:43 -08005264 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005265
5266 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5267 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5268 return -EINVAL;
5269
5270 dev->mtu = new_mtu;
5271 if (netif_running(dev)) {
5272 bnx2_netif_stop(bp);
5273
5274 bnx2_init_nic(bp);
5275
5276 bnx2_netif_start(bp);
5277 }
5278 return 0;
5279}
5280
5281#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5282static void
5283poll_bnx2(struct net_device *dev)
5284{
Michael Chan972ec0d2006-01-23 16:12:43 -08005285 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005286
5287 disable_irq(bp->pdev->irq);
5288 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5289 enable_irq(bp->pdev->irq);
5290}
5291#endif
5292
5293static int __devinit
5294bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5295{
5296 struct bnx2 *bp;
5297 unsigned long mem_len;
5298 int rc;
5299 u32 reg;
5300
5301 SET_MODULE_OWNER(dev);
5302 SET_NETDEV_DEV(dev, &pdev->dev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005303 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005304
5305 bp->flags = 0;
5306 bp->phy_flags = 0;
5307
5308 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5309 rc = pci_enable_device(pdev);
5310 if (rc) {
5311 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5312 goto err_out;
5313 }
5314
5315 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5316 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5317 "aborting.\n");
5318 rc = -ENODEV;
5319 goto err_out_disable;
5320 }
5321
5322 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5323 if (rc) {
5324 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5325 goto err_out_disable;
5326 }
5327
5328 pci_set_master(pdev);
5329
5330 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5331 if (bp->pm_cap == 0) {
5332 printk(KERN_ERR PFX "Cannot find power management capability, "
5333 "aborting.\n");
5334 rc = -EIO;
5335 goto err_out_release;
5336 }
5337
5338 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5339 if (bp->pcix_cap == 0) {
5340 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5341 rc = -EIO;
5342 goto err_out_release;
5343 }
5344
5345 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5346 bp->flags |= USING_DAC_FLAG;
5347 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5348 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5349 "failed, aborting.\n");
5350 rc = -EIO;
5351 goto err_out_release;
5352 }
5353 }
5354 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5355 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5356 rc = -EIO;
5357 goto err_out_release;
5358 }
5359
5360 bp->dev = dev;
5361 bp->pdev = pdev;
5362
5363 spin_lock_init(&bp->phy_lock);
5364 spin_lock_init(&bp->tx_lock);
5365 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5366
5367 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5368 mem_len = MB_GET_CID_ADDR(17);
5369 dev->mem_end = dev->mem_start + mem_len;
5370 dev->irq = pdev->irq;
5371
5372 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5373
5374 if (!bp->regview) {
5375 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5376 rc = -ENOMEM;
5377 goto err_out_release;
5378 }
5379
5380 /* Configure byte swap and enable write to the reg_window registers.
5381 * Rely on CPU to do target byte swapping on big endian systems
5382 * The chip's target access swapping will not swap all accesses
5383 */
5384 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5385 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5386 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5387
Pavel Machek829ca9a2005-09-03 15:56:56 -07005388 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005389
5390 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5391
Michael Chanb6016b72005-05-26 13:03:09 -07005392 /* Get bus information. */
5393 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5394 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5395 u32 clkreg;
5396
5397 bp->flags |= PCIX_FLAG;
5398
5399 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5400
5401 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5402 switch (clkreg) {
5403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5404 bp->bus_speed_mhz = 133;
5405 break;
5406
5407 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5408 bp->bus_speed_mhz = 100;
5409 break;
5410
5411 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5412 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5413 bp->bus_speed_mhz = 66;
5414 break;
5415
5416 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5417 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5418 bp->bus_speed_mhz = 50;
5419 break;
5420
5421 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5422 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5424 bp->bus_speed_mhz = 33;
5425 break;
5426 }
5427 }
5428 else {
5429 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5430 bp->bus_speed_mhz = 66;
5431 else
5432 bp->bus_speed_mhz = 33;
5433 }
5434
5435 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5436 bp->flags |= PCI_32BIT_FLAG;
5437
5438 /* 5706A0 may falsely detect SERR and PERR. */
5439 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5440 reg = REG_RD(bp, PCI_COMMAND);
5441 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5442 REG_WR(bp, PCI_COMMAND, reg);
5443 }
5444 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5445 !(bp->flags & PCIX_FLAG)) {
5446
5447 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5448 "aborting.\n");
5449 goto err_out_unmap;
5450 }
5451
5452 bnx2_init_nvram(bp);
5453
Michael Chane3648b32005-11-04 08:51:21 -08005454 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5455
5456 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5457 BNX2_SHM_HDR_SIGNATURE_SIG)
5458 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5459 else
5460 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5461
Michael Chanb6016b72005-05-26 13:03:09 -07005462 /* Get the permanent MAC address. First we need to make sure the
5463 * firmware is actually running.
5464 */
Michael Chane3648b32005-11-04 08:51:21 -08005465 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
Michael Chanb6016b72005-05-26 13:03:09 -07005466
5467 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5468 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5469 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5470 rc = -ENODEV;
5471 goto err_out_unmap;
5472 }
5473
Michael Chane3648b32005-11-04 08:51:21 -08005474 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
Michael Chanb6016b72005-05-26 13:03:09 -07005475
Michael Chane3648b32005-11-04 08:51:21 -08005476 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
Michael Chanb6016b72005-05-26 13:03:09 -07005477 bp->mac_addr[0] = (u8) (reg >> 8);
5478 bp->mac_addr[1] = (u8) reg;
5479
Michael Chane3648b32005-11-04 08:51:21 -08005480 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
Michael Chanb6016b72005-05-26 13:03:09 -07005481 bp->mac_addr[2] = (u8) (reg >> 24);
5482 bp->mac_addr[3] = (u8) (reg >> 16);
5483 bp->mac_addr[4] = (u8) (reg >> 8);
5484 bp->mac_addr[5] = (u8) reg;
5485
5486 bp->tx_ring_size = MAX_TX_DESC_CNT;
5487 bp->rx_ring_size = 100;
5488
5489 bp->rx_csum = 1;
5490
5491 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5492
5493 bp->tx_quick_cons_trip_int = 20;
5494 bp->tx_quick_cons_trip = 20;
5495 bp->tx_ticks_int = 80;
5496 bp->tx_ticks = 80;
5497
5498 bp->rx_quick_cons_trip_int = 6;
5499 bp->rx_quick_cons_trip = 6;
5500 bp->rx_ticks_int = 18;
5501 bp->rx_ticks = 18;
5502
5503 bp->stats_ticks = 1000000 & 0xffff00;
5504
5505 bp->timer_interval = HZ;
Michael Chancd339a02005-08-25 15:35:24 -07005506 bp->current_interval = HZ;
Michael Chanb6016b72005-05-26 13:03:09 -07005507
Michael Chan5b0c76a2005-11-04 08:45:49 -08005508 bp->phy_addr = 1;
5509
Michael Chanb6016b72005-05-26 13:03:09 -07005510 /* Disable WOL support if we are running on a SERDES chip. */
5511 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5512 bp->phy_flags |= PHY_SERDES_FLAG;
5513 bp->flags |= NO_WOL_FLAG;
Michael Chan5b0c76a2005-11-04 08:45:49 -08005514 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5515 bp->phy_addr = 2;
Michael Chane3648b32005-11-04 08:51:21 -08005516 reg = REG_RD_IND(bp, bp->shmem_base +
Michael Chan5b0c76a2005-11-04 08:45:49 -08005517 BNX2_SHARED_HW_CFG_CONFIG);
5518 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5519 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5520 }
Michael Chanb6016b72005-05-26 13:03:09 -07005521 }
5522
Michael Chandda1e392006-01-23 16:08:14 -08005523 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5524 bp->flags |= NO_WOL_FLAG;
5525
Michael Chanb6016b72005-05-26 13:03:09 -07005526 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5527 bp->tx_quick_cons_trip_int =
5528 bp->tx_quick_cons_trip;
5529 bp->tx_ticks_int = bp->tx_ticks;
5530 bp->rx_quick_cons_trip_int =
5531 bp->rx_quick_cons_trip;
5532 bp->rx_ticks_int = bp->rx_ticks;
5533 bp->comp_prod_trip_int = bp->comp_prod_trip;
5534 bp->com_ticks_int = bp->com_ticks;
5535 bp->cmd_ticks_int = bp->cmd_ticks;
5536 }
5537
5538 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5539 bp->req_line_speed = 0;
5540 if (bp->phy_flags & PHY_SERDES_FLAG) {
5541 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
Michael Chancd339a02005-08-25 15:35:24 -07005542
Michael Chane3648b32005-11-04 08:51:21 -08005543 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
Michael Chancd339a02005-08-25 15:35:24 -07005544 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5545 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5546 bp->autoneg = 0;
5547 bp->req_line_speed = bp->line_speed = SPEED_1000;
5548 bp->req_duplex = DUPLEX_FULL;
5549 }
Michael Chanb6016b72005-05-26 13:03:09 -07005550 }
5551 else {
5552 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5553 }
5554
5555 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5556
Michael Chancd339a02005-08-25 15:35:24 -07005557 init_timer(&bp->timer);
5558 bp->timer.expires = RUN_AT(bp->timer_interval);
5559 bp->timer.data = (unsigned long) bp;
5560 bp->timer.function = bnx2_timer;
5561
Michael Chanb6016b72005-05-26 13:03:09 -07005562 return 0;
5563
5564err_out_unmap:
5565 if (bp->regview) {
5566 iounmap(bp->regview);
Michael Chan73eef4c2005-08-25 15:39:15 -07005567 bp->regview = NULL;
Michael Chanb6016b72005-05-26 13:03:09 -07005568 }
5569
5570err_out_release:
5571 pci_release_regions(pdev);
5572
5573err_out_disable:
5574 pci_disable_device(pdev);
5575 pci_set_drvdata(pdev, NULL);
5576
5577err_out:
5578 return rc;
5579}
5580
5581static int __devinit
5582bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5583{
5584 static int version_printed = 0;
5585 struct net_device *dev = NULL;
5586 struct bnx2 *bp;
5587 int rc, i;
5588
5589 if (version_printed++ == 0)
5590 printk(KERN_INFO "%s", version);
5591
5592 /* dev zeroed in init_etherdev */
5593 dev = alloc_etherdev(sizeof(*bp));
5594
5595 if (!dev)
5596 return -ENOMEM;
5597
5598 rc = bnx2_init_board(pdev, dev);
5599 if (rc < 0) {
5600 free_netdev(dev);
5601 return rc;
5602 }
5603
5604 dev->open = bnx2_open;
5605 dev->hard_start_xmit = bnx2_start_xmit;
5606 dev->stop = bnx2_close;
5607 dev->get_stats = bnx2_get_stats;
5608 dev->set_multicast_list = bnx2_set_rx_mode;
5609 dev->do_ioctl = bnx2_ioctl;
5610 dev->set_mac_address = bnx2_change_mac_addr;
5611 dev->change_mtu = bnx2_change_mtu;
5612 dev->tx_timeout = bnx2_tx_timeout;
5613 dev->watchdog_timeo = TX_TIMEOUT;
5614#ifdef BCM_VLAN
5615 dev->vlan_rx_register = bnx2_vlan_rx_register;
5616 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5617#endif
5618 dev->poll = bnx2_poll;
5619 dev->ethtool_ops = &bnx2_ethtool_ops;
5620 dev->weight = 64;
5621
Michael Chan972ec0d2006-01-23 16:12:43 -08005622 bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005623
5624#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5625 dev->poll_controller = poll_bnx2;
5626#endif
5627
5628 if ((rc = register_netdev(dev))) {
5629 printk(KERN_ERR PFX "Cannot register net device\n");
5630 if (bp->regview)
5631 iounmap(bp->regview);
5632 pci_release_regions(pdev);
5633 pci_disable_device(pdev);
5634 pci_set_drvdata(pdev, NULL);
5635 free_netdev(dev);
5636 return rc;
5637 }
5638
5639 pci_set_drvdata(pdev, dev);
5640
5641 memcpy(dev->dev_addr, bp->mac_addr, 6);
John W. Linville24b8e052005-09-12 14:45:08 -07005642 memcpy(dev->perm_addr, bp->mac_addr, 6);
Michael Chanb6016b72005-05-26 13:03:09 -07005643 bp->name = board_info[ent->driver_data].name,
5644 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5645 "IRQ %d, ",
5646 dev->name,
5647 bp->name,
5648 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5649 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5650 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5651 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5652 bp->bus_speed_mhz,
5653 dev->base_addr,
5654 bp->pdev->irq);
5655
5656 printk("node addr ");
5657 for (i = 0; i < 6; i++)
5658 printk("%2.2x", dev->dev_addr[i]);
5659 printk("\n");
5660
5661 dev->features |= NETIF_F_SG;
5662 if (bp->flags & USING_DAC_FLAG)
5663 dev->features |= NETIF_F_HIGHDMA;
5664 dev->features |= NETIF_F_IP_CSUM;
5665#ifdef BCM_VLAN
5666 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5667#endif
5668#ifdef BCM_TSO
5669 dev->features |= NETIF_F_TSO;
5670#endif
5671
5672 netif_carrier_off(bp->dev);
5673
5674 return 0;
5675}
5676
5677static void __devexit
5678bnx2_remove_one(struct pci_dev *pdev)
5679{
5680 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005681 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005682
Michael Chanafdc08b2005-08-25 15:34:29 -07005683 flush_scheduled_work();
5684
Michael Chanb6016b72005-05-26 13:03:09 -07005685 unregister_netdev(dev);
5686
5687 if (bp->regview)
5688 iounmap(bp->regview);
5689
5690 free_netdev(dev);
5691 pci_release_regions(pdev);
5692 pci_disable_device(pdev);
5693 pci_set_drvdata(pdev, NULL);
5694}
5695
5696static int
Pavel Machek829ca9a2005-09-03 15:56:56 -07005697bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
Michael Chanb6016b72005-05-26 13:03:09 -07005698{
5699 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005700 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005701 u32 reset_code;
5702
5703 if (!netif_running(dev))
5704 return 0;
5705
5706 bnx2_netif_stop(bp);
5707 netif_device_detach(dev);
5708 del_timer_sync(&bp->timer);
Michael Chandda1e392006-01-23 16:08:14 -08005709 if (bp->flags & NO_WOL_FLAG)
5710 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5711 else if (bp->wol)
Michael Chanb6016b72005-05-26 13:03:09 -07005712 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5713 else
5714 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5715 bnx2_reset_chip(bp, reset_code);
5716 bnx2_free_skbs(bp);
Pavel Machek829ca9a2005-09-03 15:56:56 -07005717 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
Michael Chanb6016b72005-05-26 13:03:09 -07005718 return 0;
5719}
5720
5721static int
5722bnx2_resume(struct pci_dev *pdev)
5723{
5724 struct net_device *dev = pci_get_drvdata(pdev);
Michael Chan972ec0d2006-01-23 16:12:43 -08005725 struct bnx2 *bp = netdev_priv(dev);
Michael Chanb6016b72005-05-26 13:03:09 -07005726
5727 if (!netif_running(dev))
5728 return 0;
5729
Pavel Machek829ca9a2005-09-03 15:56:56 -07005730 bnx2_set_power_state(bp, PCI_D0);
Michael Chanb6016b72005-05-26 13:03:09 -07005731 netif_device_attach(dev);
5732 bnx2_init_nic(bp);
5733 bnx2_netif_start(bp);
5734 return 0;
5735}
5736
5737static struct pci_driver bnx2_pci_driver = {
Peter Hagervall14ab9b82005-08-10 14:18:16 -07005738 .name = DRV_MODULE_NAME,
5739 .id_table = bnx2_pci_tbl,
5740 .probe = bnx2_init_one,
5741 .remove = __devexit_p(bnx2_remove_one),
5742 .suspend = bnx2_suspend,
5743 .resume = bnx2_resume,
Michael Chanb6016b72005-05-26 13:03:09 -07005744};
5745
5746static int __init bnx2_init(void)
5747{
5748 return pci_module_init(&bnx2_pci_driver);
5749}
5750
5751static void __exit bnx2_cleanup(void)
5752{
5753 pci_unregister_driver(&bnx2_pci_driver);
5754}
5755
5756module_init(bnx2_init);
5757module_exit(bnx2_cleanup);
5758
5759
5760