blob: a67d537ccf49582f056106201295d92bbe681bda [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 *
6 * Distribute under GPL.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/types.h>
13#include <linux/netdevice.h>
14#include <linux/ethtool.h>
15#include <linux/mii.h>
16#include <linux/if_ether.h>
17#include <linux/etherdevice.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/init.h>
Andrew Morton89358f92005-10-28 16:38:02 -040021#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#include <asm/uaccess.h>
24#include <asm/io.h>
25#include <asm/irq.h>
26
27#include "b44.h"
28
29#define DRV_MODULE_NAME "b44"
30#define PFX DRV_MODULE_NAME ": "
Francois Romieu981d9c12005-11-30 22:35:39 +010031#define DRV_MODULE_VERSION "0.97"
32#define DRV_MODULE_RELDATE "Nov 30, 2005"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#define B44_DEF_MSG_ENABLE \
35 (NETIF_MSG_DRV | \
36 NETIF_MSG_PROBE | \
37 NETIF_MSG_LINK | \
38 NETIF_MSG_TIMER | \
39 NETIF_MSG_IFDOWN | \
40 NETIF_MSG_IFUP | \
41 NETIF_MSG_RX_ERR | \
42 NETIF_MSG_TX_ERR)
43
44/* length of time before we decide the hardware is borked,
45 * and dev->tx_timeout() should be called to fix the problem
46 */
47#define B44_TX_TIMEOUT (5 * HZ)
48
49/* hardware minimum and maximum for a single frame's data payload */
50#define B44_MIN_MTU 60
51#define B44_MAX_MTU 1500
52
53#define B44_RX_RING_SIZE 512
54#define B44_DEF_RX_RING_PENDING 200
55#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
56 B44_RX_RING_SIZE)
57#define B44_TX_RING_SIZE 512
58#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
59#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
60 B44_TX_RING_SIZE)
61#define B44_DMA_MASK 0x3fffffff
62
63#define TX_RING_GAP(BP) \
64 (B44_TX_RING_SIZE - (BP)->tx_pending)
65#define TX_BUFFS_AVAIL(BP) \
66 (((BP)->tx_cons <= (BP)->tx_prod) ? \
67 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
68 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
70
71#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
72#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
73
74/* minimum number of free TX descriptors required to wake up TX process */
75#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
76
77static char version[] __devinitdata =
78 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79
80MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
81MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
84
85static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
86module_param(b44_debug, int, 0);
87MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
88
89static struct pci_device_id b44_pci_tbl[] = {
90 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
91 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
92 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
93 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
94 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
95 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
96 { } /* terminate list with empty entry */
97};
98
99MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
100
101static void b44_halt(struct b44 *);
102static void b44_init_rings(struct b44 *);
103static void b44_init_hw(struct b44 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
John W. Linville9f38c632005-10-18 21:30:59 -0400105static int dma_desc_align_mask;
106static int dma_desc_sync_size;
107
Francois Romieu33539302005-11-07 01:51:34 +0100108static const char b44_gstrings[][ETH_GSTRING_LEN] = {
109#define _B44(x...) # x,
110B44_STAT_REG_DECLARE
111#undef _B44
112};
113
John W. Linville9f38c632005-10-18 21:30:59 -0400114static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
115 dma_addr_t dma_base,
116 unsigned long offset,
117 enum dma_data_direction dir)
118{
119 dma_sync_single_range_for_device(&pdev->dev, dma_base,
120 offset & dma_desc_align_mask,
121 dma_desc_sync_size, dir);
122}
123
124static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
125 dma_addr_t dma_base,
126 unsigned long offset,
127 enum dma_data_direction dir)
128{
129 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
130 offset & dma_desc_align_mask,
131 dma_desc_sync_size, dir);
132}
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
135{
136 return readl(bp->regs + reg);
137}
138
139static inline void bw32(const struct b44 *bp,
140 unsigned long reg, unsigned long val)
141{
142 writel(val, bp->regs + reg);
143}
144
145static int b44_wait_bit(struct b44 *bp, unsigned long reg,
146 u32 bit, unsigned long timeout, const int clear)
147{
148 unsigned long i;
149
150 for (i = 0; i < timeout; i++) {
151 u32 val = br32(bp, reg);
152
153 if (clear && !(val & bit))
154 break;
155 if (!clear && (val & bit))
156 break;
157 udelay(10);
158 }
159 if (i == timeout) {
160 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
161 "%lx to %s.\n",
162 bp->dev->name,
163 bit, reg,
164 (clear ? "clear" : "set"));
165 return -ENODEV;
166 }
167 return 0;
168}
169
170/* Sonics SiliconBackplane support routines. ROFL, you should see all the
171 * buzz words used on this company's website :-)
172 *
173 * All of these routines must be invoked with bp->lock held and
174 * interrupts disabled.
175 */
176
177#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
178#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
179
180static u32 ssb_get_core_rev(struct b44 *bp)
181{
182 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
183}
184
185static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
186{
187 u32 bar_orig, pci_rev, val;
188
189 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
190 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
191 pci_rev = ssb_get_core_rev(bp);
192
193 val = br32(bp, B44_SBINTVEC);
194 val |= cores;
195 bw32(bp, B44_SBINTVEC, val);
196
197 val = br32(bp, SSB_PCI_TRANS_2);
198 val |= SSB_PCI_PREF | SSB_PCI_BURST;
199 bw32(bp, SSB_PCI_TRANS_2, val);
200
201 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
202
203 return pci_rev;
204}
205
206static void ssb_core_disable(struct b44 *bp)
207{
208 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
209 return;
210
211 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
212 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
213 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
214 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
215 SBTMSLOW_REJECT | SBTMSLOW_RESET));
216 br32(bp, B44_SBTMSLOW);
217 udelay(1);
218 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
219 br32(bp, B44_SBTMSLOW);
220 udelay(1);
221}
222
223static void ssb_core_reset(struct b44 *bp)
224{
225 u32 val;
226
227 ssb_core_disable(bp);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
229 br32(bp, B44_SBTMSLOW);
230 udelay(1);
231
232 /* Clear SERR if set, this is a hw bug workaround. */
233 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
234 bw32(bp, B44_SBTMSHIGH, 0);
235
236 val = br32(bp, B44_SBIMSTATE);
237 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
238 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
239
240 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
241 br32(bp, B44_SBTMSLOW);
242 udelay(1);
243
244 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
245 br32(bp, B44_SBTMSLOW);
246 udelay(1);
247}
248
249static int ssb_core_unit(struct b44 *bp)
250{
251#if 0
252 u32 val = br32(bp, B44_SBADMATCH0);
253 u32 base;
254
255 type = val & SBADMATCH0_TYPE_MASK;
256 switch (type) {
257 case 0:
258 base = val & SBADMATCH0_BS0_MASK;
259 break;
260
261 case 1:
262 base = val & SBADMATCH0_BS1_MASK;
263 break;
264
265 case 2:
266 default:
267 base = val & SBADMATCH0_BS2_MASK;
268 break;
269 };
270#endif
271 return 0;
272}
273
274static int ssb_is_core_up(struct b44 *bp)
275{
276 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
277 == SBTMSLOW_CLOCK);
278}
279
280static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
281{
282 u32 val;
283
284 val = ((u32) data[2]) << 24;
285 val |= ((u32) data[3]) << 16;
286 val |= ((u32) data[4]) << 8;
287 val |= ((u32) data[5]) << 0;
288 bw32(bp, B44_CAM_DATA_LO, val);
289 val = (CAM_DATA_HI_VALID |
290 (((u32) data[0]) << 8) |
291 (((u32) data[1]) << 0));
292 bw32(bp, B44_CAM_DATA_HI, val);
293 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
294 (index << CAM_CTRL_INDEX_SHIFT)));
295 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
296}
297
298static inline void __b44_disable_ints(struct b44 *bp)
299{
300 bw32(bp, B44_IMASK, 0);
301}
302
303static void b44_disable_ints(struct b44 *bp)
304{
305 __b44_disable_ints(bp);
306
307 /* Flush posted writes. */
308 br32(bp, B44_IMASK);
309}
310
311static void b44_enable_ints(struct b44 *bp)
312{
313 bw32(bp, B44_IMASK, bp->imask);
314}
315
316static int b44_readphy(struct b44 *bp, int reg, u32 *val)
317{
318 int err;
319
320 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
321 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
322 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
323 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
324 (reg << MDIO_DATA_RA_SHIFT) |
325 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
326 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
327 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
328
329 return err;
330}
331
332static int b44_writephy(struct b44 *bp, int reg, u32 val)
333{
334 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
335 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
336 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
337 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
338 (reg << MDIO_DATA_RA_SHIFT) |
339 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
340 (val & MDIO_DATA_DATA)));
341 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342}
343
344/* miilib interface */
345/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
346 * due to code existing before miilib use was added to this driver.
347 * Someone should remove this artificial driver limitation in
348 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
349 */
350static int b44_mii_read(struct net_device *dev, int phy_id, int location)
351{
352 u32 val;
353 struct b44 *bp = netdev_priv(dev);
354 int rc = b44_readphy(bp, location, &val);
355 if (rc)
356 return 0xffffffff;
357 return val;
358}
359
360static void b44_mii_write(struct net_device *dev, int phy_id, int location,
361 int val)
362{
363 struct b44 *bp = netdev_priv(dev);
364 b44_writephy(bp, location, val);
365}
366
367static int b44_phy_reset(struct b44 *bp)
368{
369 u32 val;
370 int err;
371
372 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
373 if (err)
374 return err;
375 udelay(100);
376 err = b44_readphy(bp, MII_BMCR, &val);
377 if (!err) {
378 if (val & BMCR_RESET) {
379 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
380 bp->dev->name);
381 err = -ENODEV;
382 }
383 }
384
385 return 0;
386}
387
388static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
389{
390 u32 val;
391
392 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
393 bp->flags |= pause_flags;
394
395 val = br32(bp, B44_RXCONFIG);
396 if (pause_flags & B44_FLAG_RX_PAUSE)
397 val |= RXCONFIG_FLOW;
398 else
399 val &= ~RXCONFIG_FLOW;
400 bw32(bp, B44_RXCONFIG, val);
401
402 val = br32(bp, B44_MAC_FLOW);
403 if (pause_flags & B44_FLAG_TX_PAUSE)
404 val |= (MAC_FLOW_PAUSE_ENAB |
405 (0xc0 & MAC_FLOW_RX_HI_WATER));
406 else
407 val &= ~MAC_FLOW_PAUSE_ENAB;
408 bw32(bp, B44_MAC_FLOW, val);
409}
410
411static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
412{
Gary Zambrano2b474cf2006-04-10 12:02:21 -0700413 u32 pause_enab = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Gary Zambrano2b474cf2006-04-10 12:02:21 -0700415 /* The driver supports only rx pause by default because
416 the b44 mac tx pause mechanism generates excessive
417 pause frames.
418 Use ethtool to turn on b44 tx pause if necessary.
419 */
420 if ((local & ADVERTISE_PAUSE_CAP) &&
421 (local & ADVERTISE_PAUSE_ASYM)){
422 if ((remote & LPA_PAUSE_ASYM) &&
423 !(remote & LPA_PAUSE_CAP))
424 pause_enab |= B44_FLAG_RX_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
426
427 __b44_set_flow_ctrl(bp, pause_enab);
428}
429
430static int b44_setup_phy(struct b44 *bp)
431{
432 u32 val;
433 int err;
434
435 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
436 goto out;
437 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
438 val & MII_ALEDCTRL_ALLMSK)) != 0)
439 goto out;
440 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
441 goto out;
442 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
443 val | MII_TLEDCTRL_ENABLE)) != 0)
444 goto out;
445
446 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
447 u32 adv = ADVERTISE_CSMA;
448
449 if (bp->flags & B44_FLAG_ADV_10HALF)
450 adv |= ADVERTISE_10HALF;
451 if (bp->flags & B44_FLAG_ADV_10FULL)
452 adv |= ADVERTISE_10FULL;
453 if (bp->flags & B44_FLAG_ADV_100HALF)
454 adv |= ADVERTISE_100HALF;
455 if (bp->flags & B44_FLAG_ADV_100FULL)
456 adv |= ADVERTISE_100FULL;
457
458 if (bp->flags & B44_FLAG_PAUSE_AUTO)
459 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
460
461 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
462 goto out;
463 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
464 BMCR_ANRESTART))) != 0)
465 goto out;
466 } else {
467 u32 bmcr;
468
469 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
470 goto out;
471 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
472 if (bp->flags & B44_FLAG_100_BASE_T)
473 bmcr |= BMCR_SPEED100;
474 if (bp->flags & B44_FLAG_FULL_DUPLEX)
475 bmcr |= BMCR_FULLDPLX;
476 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
477 goto out;
478
479 /* Since we will not be negotiating there is no safe way
480 * to determine if the link partner supports flow control
481 * or not. So just disable it completely in this case.
482 */
483 b44_set_flow_ctrl(bp, 0, 0);
484 }
485
486out:
487 return err;
488}
489
490static void b44_stats_update(struct b44 *bp)
491{
492 unsigned long reg;
493 u32 *val;
494
495 val = &bp->hw_stats.tx_good_octets;
496 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
497 *val++ += br32(bp, reg);
498 }
Francois Romieu33539302005-11-07 01:51:34 +0100499
500 /* Pad */
501 reg += 8*4UL;
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
504 *val++ += br32(bp, reg);
505 }
506}
507
508static void b44_link_report(struct b44 *bp)
509{
510 if (!netif_carrier_ok(bp->dev)) {
511 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
512 } else {
513 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
514 bp->dev->name,
515 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
516 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
517
518 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
519 "%s for RX.\n",
520 bp->dev->name,
521 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
522 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
523 }
524}
525
526static void b44_check_phy(struct b44 *bp)
527{
528 u32 bmsr, aux;
529
530 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
531 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
532 (bmsr != 0xffff)) {
533 if (aux & MII_AUXCTRL_SPEED)
534 bp->flags |= B44_FLAG_100_BASE_T;
535 else
536 bp->flags &= ~B44_FLAG_100_BASE_T;
537 if (aux & MII_AUXCTRL_DUPLEX)
538 bp->flags |= B44_FLAG_FULL_DUPLEX;
539 else
540 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
541
542 if (!netif_carrier_ok(bp->dev) &&
543 (bmsr & BMSR_LSTATUS)) {
544 u32 val = br32(bp, B44_TX_CTRL);
545 u32 local_adv, remote_adv;
546
547 if (bp->flags & B44_FLAG_FULL_DUPLEX)
548 val |= TX_CTRL_DUPLEX;
549 else
550 val &= ~TX_CTRL_DUPLEX;
551 bw32(bp, B44_TX_CTRL, val);
552
553 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
554 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
555 !b44_readphy(bp, MII_LPA, &remote_adv))
556 b44_set_flow_ctrl(bp, local_adv, remote_adv);
557
558 /* Link now up */
559 netif_carrier_on(bp->dev);
560 b44_link_report(bp);
561 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
562 /* Link now down */
563 netif_carrier_off(bp->dev);
564 b44_link_report(bp);
565 }
566
567 if (bmsr & BMSR_RFAULT)
568 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
569 bp->dev->name);
570 if (bmsr & BMSR_JCD)
571 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
572 bp->dev->name);
573 }
574}
575
576static void b44_timer(unsigned long __opaque)
577{
578 struct b44 *bp = (struct b44 *) __opaque;
579
580 spin_lock_irq(&bp->lock);
581
582 b44_check_phy(bp);
583
584 b44_stats_update(bp);
585
586 spin_unlock_irq(&bp->lock);
587
588 bp->timer.expires = jiffies + HZ;
589 add_timer(&bp->timer);
590}
591
592static void b44_tx(struct b44 *bp)
593{
594 u32 cur, cons;
595
596 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
597 cur /= sizeof(struct dma_desc);
598
599 /* XXX needs updating when NETIF_F_SG is supported */
600 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
601 struct ring_info *rp = &bp->tx_buffers[cons];
602 struct sk_buff *skb = rp->skb;
603
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +0200604 BUG_ON(skb == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606 pci_unmap_single(bp->pdev,
607 pci_unmap_addr(rp, mapping),
608 skb->len,
609 PCI_DMA_TODEVICE);
610 rp->skb = NULL;
611 dev_kfree_skb_irq(skb);
612 }
613
614 bp->tx_cons = cons;
615 if (netif_queue_stopped(bp->dev) &&
616 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
617 netif_wake_queue(bp->dev);
618
619 bw32(bp, B44_GPTIMER, 0);
620}
621
622/* Works like this. This chip writes a 'struct rx_header" 30 bytes
623 * before the DMA address you give it. So we allocate 30 more bytes
624 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
625 * point the chip at 30 bytes past where the rx_header will go.
626 */
627static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
628{
629 struct dma_desc *dp;
630 struct ring_info *src_map, *map;
631 struct rx_header *rh;
632 struct sk_buff *skb;
633 dma_addr_t mapping;
634 int dest_idx;
635 u32 ctrl;
636
637 src_map = NULL;
638 if (src_idx >= 0)
639 src_map = &bp->rx_buffers[src_idx];
640 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
641 map = &bp->rx_buffers[dest_idx];
642 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
643 if (skb == NULL)
644 return -ENOMEM;
645
646 mapping = pci_map_single(bp->pdev, skb->data,
647 RX_PKT_BUF_SZ,
648 PCI_DMA_FROMDEVICE);
649
650 /* Hardware bug work-around, the chip is unable to do PCI DMA
651 to/from anything above 1GB :-( */
Francois Romieu874a6212005-11-07 01:50:46 +0100652 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 /* Sigh... */
654 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
655 dev_kfree_skb_any(skb);
656 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
657 if (skb == NULL)
658 return -ENOMEM;
659 mapping = pci_map_single(bp->pdev, skb->data,
660 RX_PKT_BUF_SZ,
661 PCI_DMA_FROMDEVICE);
Francois Romieu874a6212005-11-07 01:50:46 +0100662 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
664 dev_kfree_skb_any(skb);
665 return -ENOMEM;
666 }
667 }
668
669 skb->dev = bp->dev;
670 skb_reserve(skb, bp->rx_offset);
671
672 rh = (struct rx_header *)
673 (skb->data - bp->rx_offset);
674 rh->len = 0;
675 rh->flags = 0;
676
677 map->skb = skb;
678 pci_unmap_addr_set(map, mapping, mapping);
679
680 if (src_map != NULL)
681 src_map->skb = NULL;
682
683 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
684 if (dest_idx == (B44_RX_RING_SIZE - 1))
685 ctrl |= DESC_CTRL_EOT;
686
687 dp = &bp->rx_ring[dest_idx];
688 dp->ctrl = cpu_to_le32(ctrl);
689 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
690
John W. Linville9f38c632005-10-18 21:30:59 -0400691 if (bp->flags & B44_FLAG_RX_RING_HACK)
692 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
693 dest_idx * sizeof(dp),
694 DMA_BIDIRECTIONAL);
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 return RX_PKT_BUF_SZ;
697}
698
699static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
700{
701 struct dma_desc *src_desc, *dest_desc;
702 struct ring_info *src_map, *dest_map;
703 struct rx_header *rh;
704 int dest_idx;
705 u32 ctrl;
706
707 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
708 dest_desc = &bp->rx_ring[dest_idx];
709 dest_map = &bp->rx_buffers[dest_idx];
710 src_desc = &bp->rx_ring[src_idx];
711 src_map = &bp->rx_buffers[src_idx];
712
713 dest_map->skb = src_map->skb;
714 rh = (struct rx_header *) src_map->skb->data;
715 rh->len = 0;
716 rh->flags = 0;
717 pci_unmap_addr_set(dest_map, mapping,
718 pci_unmap_addr(src_map, mapping));
719
John W. Linville9f38c632005-10-18 21:30:59 -0400720 if (bp->flags & B44_FLAG_RX_RING_HACK)
721 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
722 src_idx * sizeof(src_desc),
723 DMA_BIDIRECTIONAL);
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 ctrl = src_desc->ctrl;
726 if (dest_idx == (B44_RX_RING_SIZE - 1))
727 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
728 else
729 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
730
731 dest_desc->ctrl = ctrl;
732 dest_desc->addr = src_desc->addr;
John W. Linville9f38c632005-10-18 21:30:59 -0400733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 src_map->skb = NULL;
735
John W. Linville9f38c632005-10-18 21:30:59 -0400736 if (bp->flags & B44_FLAG_RX_RING_HACK)
737 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
738 dest_idx * sizeof(dest_desc),
739 DMA_BIDIRECTIONAL);
740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
742 RX_PKT_BUF_SZ,
743 PCI_DMA_FROMDEVICE);
744}
745
746static int b44_rx(struct b44 *bp, int budget)
747{
748 int received;
749 u32 cons, prod;
750
751 received = 0;
752 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
753 prod /= sizeof(struct dma_desc);
754 cons = bp->rx_cons;
755
756 while (cons != prod && budget > 0) {
757 struct ring_info *rp = &bp->rx_buffers[cons];
758 struct sk_buff *skb = rp->skb;
759 dma_addr_t map = pci_unmap_addr(rp, mapping);
760 struct rx_header *rh;
761 u16 len;
762
763 pci_dma_sync_single_for_cpu(bp->pdev, map,
764 RX_PKT_BUF_SZ,
765 PCI_DMA_FROMDEVICE);
766 rh = (struct rx_header *) skb->data;
767 len = cpu_to_le16(rh->len);
768 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
769 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
770 drop_it:
771 b44_recycle_rx(bp, cons, bp->rx_prod);
772 drop_it_no_recycle:
773 bp->stats.rx_dropped++;
774 goto next_pkt;
775 }
776
777 if (len == 0) {
778 int i = 0;
779
780 do {
781 udelay(2);
782 barrier();
783 len = cpu_to_le16(rh->len);
784 } while (len == 0 && i++ < 5);
785 if (len == 0)
786 goto drop_it;
787 }
788
789 /* Omit CRC. */
790 len -= 4;
791
792 if (len > RX_COPY_THRESHOLD) {
793 int skb_size;
794 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
795 if (skb_size < 0)
796 goto drop_it;
797 pci_unmap_single(bp->pdev, map,
798 skb_size, PCI_DMA_FROMDEVICE);
799 /* Leave out rx_header */
800 skb_put(skb, len+bp->rx_offset);
801 skb_pull(skb,bp->rx_offset);
802 } else {
803 struct sk_buff *copy_skb;
804
805 b44_recycle_rx(bp, cons, bp->rx_prod);
806 copy_skb = dev_alloc_skb(len + 2);
807 if (copy_skb == NULL)
808 goto drop_it_no_recycle;
809
810 copy_skb->dev = bp->dev;
811 skb_reserve(copy_skb, 2);
812 skb_put(copy_skb, len);
813 /* DMA sync done above, copy just the actual packet */
814 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
815
816 skb = copy_skb;
817 }
818 skb->ip_summed = CHECKSUM_NONE;
819 skb->protocol = eth_type_trans(skb, bp->dev);
820 netif_receive_skb(skb);
821 bp->dev->last_rx = jiffies;
822 received++;
823 budget--;
824 next_pkt:
825 bp->rx_prod = (bp->rx_prod + 1) &
826 (B44_RX_RING_SIZE - 1);
827 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
828 }
829
830 bp->rx_cons = cons;
831 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
832
833 return received;
834}
835
836static int b44_poll(struct net_device *netdev, int *budget)
837{
838 struct b44 *bp = netdev_priv(netdev);
839 int done;
840
841 spin_lock_irq(&bp->lock);
842
843 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
844 /* spin_lock(&bp->tx_lock); */
845 b44_tx(bp);
846 /* spin_unlock(&bp->tx_lock); */
847 }
848 spin_unlock_irq(&bp->lock);
849
850 done = 1;
851 if (bp->istat & ISTAT_RX) {
852 int orig_budget = *budget;
853 int work_done;
854
855 if (orig_budget > netdev->quota)
856 orig_budget = netdev->quota;
857
858 work_done = b44_rx(bp, orig_budget);
859
860 *budget -= work_done;
861 netdev->quota -= work_done;
862
863 if (work_done >= orig_budget)
864 done = 0;
865 }
866
867 if (bp->istat & ISTAT_ERRORS) {
868 spin_lock_irq(&bp->lock);
869 b44_halt(bp);
870 b44_init_rings(bp);
871 b44_init_hw(bp);
872 netif_wake_queue(bp->dev);
873 spin_unlock_irq(&bp->lock);
874 done = 1;
875 }
876
877 if (done) {
878 netif_rx_complete(netdev);
879 b44_enable_ints(bp);
880 }
881
882 return (done ? 0 : 1);
883}
884
885static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
886{
887 struct net_device *dev = dev_id;
888 struct b44 *bp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 u32 istat, imask;
890 int handled = 0;
891
Francois Romieu65b984f2005-11-07 01:52:06 +0100892 spin_lock(&bp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 istat = br32(bp, B44_ISTAT);
895 imask = br32(bp, B44_IMASK);
896
897 /* ??? What the fuck is the purpose of the interrupt mask
898 * ??? register if we have to mask it out by hand anyways?
899 */
900 istat &= imask;
901 if (istat) {
902 handled = 1;
Francois Romieuba5eec92005-11-08 23:37:12 +0100903
904 if (unlikely(!netif_running(dev))) {
905 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
906 goto irq_ack;
907 }
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 if (netif_rx_schedule_prep(dev)) {
910 /* NOTE: These writes are posted by the readback of
911 * the ISTAT register below.
912 */
913 bp->istat = istat;
914 __b44_disable_ints(bp);
915 __netif_rx_schedule(dev);
916 } else {
917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
918 dev->name);
919 }
920
Francois Romieuba5eec92005-11-08 23:37:12 +0100921irq_ack:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 bw32(bp, B44_ISTAT, istat);
923 br32(bp, B44_ISTAT);
924 }
Francois Romieu65b984f2005-11-07 01:52:06 +0100925 spin_unlock(&bp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 return IRQ_RETVAL(handled);
927}
928
929static void b44_tx_timeout(struct net_device *dev)
930{
931 struct b44 *bp = netdev_priv(dev);
932
933 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
934 dev->name);
935
936 spin_lock_irq(&bp->lock);
937
938 b44_halt(bp);
939 b44_init_rings(bp);
940 b44_init_hw(bp);
941
942 spin_unlock_irq(&bp->lock);
943
944 b44_enable_ints(bp);
945
946 netif_wake_queue(dev);
947}
948
949static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
950{
951 struct b44 *bp = netdev_priv(dev);
952 struct sk_buff *bounce_skb;
Francois Romieuc7193692005-11-07 01:50:03 +0100953 int rc = NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 dma_addr_t mapping;
955 u32 len, entry, ctrl;
956
957 len = skb->len;
958 spin_lock_irq(&bp->lock);
959
960 /* This is a hard error, log it. */
961 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
962 netif_stop_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
964 dev->name);
Francois Romieuc7193692005-11-07 01:50:03 +0100965 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967
968 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Francois Romieu874a6212005-11-07 01:50:46 +0100969 if (mapping + len > B44_DMA_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
971 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
972
973 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
974 GFP_ATOMIC|GFP_DMA);
975 if (!bounce_skb)
Francois Romieuc7193692005-11-07 01:50:03 +0100976 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 mapping = pci_map_single(bp->pdev, bounce_skb->data,
979 len, PCI_DMA_TODEVICE);
Francois Romieu874a6212005-11-07 01:50:46 +0100980 if (mapping + len > B44_DMA_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 pci_unmap_single(bp->pdev, mapping,
982 len, PCI_DMA_TODEVICE);
983 dev_kfree_skb_any(bounce_skb);
Francois Romieuc7193692005-11-07 01:50:03 +0100984 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 }
986
987 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
988 dev_kfree_skb_any(skb);
989 skb = bounce_skb;
990 }
991
992 entry = bp->tx_prod;
993 bp->tx_buffers[entry].skb = skb;
994 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
995
996 ctrl = (len & DESC_CTRL_LEN);
997 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
998 if (entry == (B44_TX_RING_SIZE - 1))
999 ctrl |= DESC_CTRL_EOT;
1000
1001 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1002 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1003
John W. Linville9f38c632005-10-18 21:30:59 -04001004 if (bp->flags & B44_FLAG_TX_RING_HACK)
1005 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1006 entry * sizeof(bp->tx_ring[0]),
1007 DMA_TO_DEVICE);
1008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 entry = NEXT_TX(entry);
1010
1011 bp->tx_prod = entry;
1012
1013 wmb();
1014
1015 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1016 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1017 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1018 if (bp->flags & B44_FLAG_REORDER_BUG)
1019 br32(bp, B44_DMATX_PTR);
1020
1021 if (TX_BUFFS_AVAIL(bp) < 1)
1022 netif_stop_queue(dev);
1023
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 dev->trans_start = jiffies;
1025
Francois Romieuc7193692005-11-07 01:50:03 +01001026out_unlock:
1027 spin_unlock_irq(&bp->lock);
1028
1029 return rc;
1030
1031err_out:
1032 rc = NETDEV_TX_BUSY;
1033 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
1035
1036static int b44_change_mtu(struct net_device *dev, int new_mtu)
1037{
1038 struct b44 *bp = netdev_priv(dev);
1039
1040 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1041 return -EINVAL;
1042
1043 if (!netif_running(dev)) {
1044 /* We'll just catch it later when the
1045 * device is up'd.
1046 */
1047 dev->mtu = new_mtu;
1048 return 0;
1049 }
1050
1051 spin_lock_irq(&bp->lock);
1052 b44_halt(bp);
1053 dev->mtu = new_mtu;
1054 b44_init_rings(bp);
1055 b44_init_hw(bp);
1056 spin_unlock_irq(&bp->lock);
1057
1058 b44_enable_ints(bp);
1059
1060 return 0;
1061}
1062
1063/* Free up pending packets in all rx/tx rings.
1064 *
1065 * The chip has been shut down and the driver detached from
1066 * the networking, so no interrupts or new tx packets will
1067 * end up in the driver. bp->lock is not held and we are not
1068 * in an interrupt context and thus may sleep.
1069 */
1070static void b44_free_rings(struct b44 *bp)
1071{
1072 struct ring_info *rp;
1073 int i;
1074
1075 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1076 rp = &bp->rx_buffers[i];
1077
1078 if (rp->skb == NULL)
1079 continue;
1080 pci_unmap_single(bp->pdev,
1081 pci_unmap_addr(rp, mapping),
1082 RX_PKT_BUF_SZ,
1083 PCI_DMA_FROMDEVICE);
1084 dev_kfree_skb_any(rp->skb);
1085 rp->skb = NULL;
1086 }
1087
1088 /* XXX needs changes once NETIF_F_SG is set... */
1089 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1090 rp = &bp->tx_buffers[i];
1091
1092 if (rp->skb == NULL)
1093 continue;
1094 pci_unmap_single(bp->pdev,
1095 pci_unmap_addr(rp, mapping),
1096 rp->skb->len,
1097 PCI_DMA_TODEVICE);
1098 dev_kfree_skb_any(rp->skb);
1099 rp->skb = NULL;
1100 }
1101}
1102
1103/* Initialize tx/rx rings for packet processing.
1104 *
1105 * The chip has been shut down and the driver detached from
1106 * the networking, so no interrupts or new tx packets will
Francois Romieu874a6212005-11-07 01:50:46 +01001107 * end up in the driver.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 */
1109static void b44_init_rings(struct b44 *bp)
1110{
1111 int i;
1112
1113 b44_free_rings(bp);
1114
1115 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1116 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1117
John W. Linville9f38c632005-10-18 21:30:59 -04001118 if (bp->flags & B44_FLAG_RX_RING_HACK)
1119 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1120 DMA_TABLE_BYTES,
1121 PCI_DMA_BIDIRECTIONAL);
1122
1123 if (bp->flags & B44_FLAG_TX_RING_HACK)
1124 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1125 DMA_TABLE_BYTES,
1126 PCI_DMA_TODEVICE);
1127
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 for (i = 0; i < bp->rx_pending; i++) {
1129 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1130 break;
1131 }
1132}
1133
1134/*
1135 * Must not be invoked with interrupt sources disabled and
1136 * the hardware shutdown down.
1137 */
1138static void b44_free_consistent(struct b44 *bp)
1139{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04001140 kfree(bp->rx_buffers);
1141 bp->rx_buffers = NULL;
1142 kfree(bp->tx_buffers);
1143 bp->tx_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 if (bp->rx_ring) {
John W. Linville9f38c632005-10-18 21:30:59 -04001145 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1146 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1147 DMA_TABLE_BYTES,
1148 DMA_BIDIRECTIONAL);
1149 kfree(bp->rx_ring);
1150 } else
1151 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1152 bp->rx_ring, bp->rx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 bp->rx_ring = NULL;
John W. Linville9f38c632005-10-18 21:30:59 -04001154 bp->flags &= ~B44_FLAG_RX_RING_HACK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 }
1156 if (bp->tx_ring) {
John W. Linville9f38c632005-10-18 21:30:59 -04001157 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1158 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1159 DMA_TABLE_BYTES,
1160 DMA_TO_DEVICE);
1161 kfree(bp->tx_ring);
1162 } else
1163 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1164 bp->tx_ring, bp->tx_ring_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 bp->tx_ring = NULL;
John W. Linville9f38c632005-10-18 21:30:59 -04001166 bp->flags &= ~B44_FLAG_TX_RING_HACK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 }
1168}
1169
1170/*
1171 * Must not be invoked with interrupt sources disabled and
1172 * the hardware shutdown down. Can sleep.
1173 */
1174static int b44_alloc_consistent(struct b44 *bp)
1175{
1176 int size;
1177
1178 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
Francois Romieu874a6212005-11-07 01:50:46 +01001179 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 if (!bp->rx_buffers)
1181 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
1183 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
Francois Romieu874a6212005-11-07 01:50:46 +01001184 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 if (!bp->tx_buffers)
1186 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188 size = DMA_TABLE_BYTES;
1189 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
John W. Linville9f38c632005-10-18 21:30:59 -04001190 if (!bp->rx_ring) {
1191 /* Allocation may have failed due to pci_alloc_consistent
1192 insisting on use of GFP_DMA, which is more restrictive
1193 than necessary... */
1194 struct dma_desc *rx_ring;
1195 dma_addr_t rx_ring_dma;
1196
Francois Romieu874a6212005-11-07 01:50:46 +01001197 rx_ring = kzalloc(size, GFP_KERNEL);
1198 if (!rx_ring)
John W. Linville9f38c632005-10-18 21:30:59 -04001199 goto out_err;
1200
John W. Linville9f38c632005-10-18 21:30:59 -04001201 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1202 DMA_TABLE_BYTES,
1203 DMA_BIDIRECTIONAL);
1204
1205 if (rx_ring_dma + size > B44_DMA_MASK) {
1206 kfree(rx_ring);
1207 goto out_err;
1208 }
1209
1210 bp->rx_ring = rx_ring;
1211 bp->rx_ring_dma = rx_ring_dma;
1212 bp->flags |= B44_FLAG_RX_RING_HACK;
1213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
John W. Linville9f38c632005-10-18 21:30:59 -04001216 if (!bp->tx_ring) {
1217 /* Allocation may have failed due to pci_alloc_consistent
1218 insisting on use of GFP_DMA, which is more restrictive
1219 than necessary... */
1220 struct dma_desc *tx_ring;
1221 dma_addr_t tx_ring_dma;
1222
Francois Romieu874a6212005-11-07 01:50:46 +01001223 tx_ring = kzalloc(size, GFP_KERNEL);
1224 if (!tx_ring)
John W. Linville9f38c632005-10-18 21:30:59 -04001225 goto out_err;
1226
John W. Linville9f38c632005-10-18 21:30:59 -04001227 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1228 DMA_TABLE_BYTES,
1229 DMA_TO_DEVICE);
1230
1231 if (tx_ring_dma + size > B44_DMA_MASK) {
1232 kfree(tx_ring);
1233 goto out_err;
1234 }
1235
1236 bp->tx_ring = tx_ring;
1237 bp->tx_ring_dma = tx_ring_dma;
1238 bp->flags |= B44_FLAG_TX_RING_HACK;
1239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 return 0;
1242
1243out_err:
1244 b44_free_consistent(bp);
1245 return -ENOMEM;
1246}
1247
1248/* bp->lock is held. */
1249static void b44_clear_stats(struct b44 *bp)
1250{
1251 unsigned long reg;
1252
1253 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1254 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1255 br32(bp, reg);
1256 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1257 br32(bp, reg);
1258}
1259
1260/* bp->lock is held. */
1261static void b44_chip_reset(struct b44 *bp)
1262{
1263 if (ssb_is_core_up(bp)) {
1264 bw32(bp, B44_RCV_LAZY, 0);
1265 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1266 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1267 bw32(bp, B44_DMATX_CTRL, 0);
1268 bp->tx_prod = bp->tx_cons = 0;
1269 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1270 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1271 100, 0);
1272 }
1273 bw32(bp, B44_DMARX_CTRL, 0);
1274 bp->rx_prod = bp->rx_cons = 0;
1275 } else {
1276 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1277 SBINTVEC_ENET0 :
1278 SBINTVEC_ENET1));
1279 }
1280
1281 ssb_core_reset(bp);
1282
1283 b44_clear_stats(bp);
1284
1285 /* Make PHY accessible. */
1286 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1287 (0x0d & MDIO_CTRL_MAXF_MASK)));
1288 br32(bp, B44_MDIO_CTRL);
1289
1290 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1291 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1292 br32(bp, B44_ENET_CTRL);
1293 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1294 } else {
1295 u32 val = br32(bp, B44_DEVCTRL);
1296
1297 if (val & DEVCTRL_EPR) {
1298 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1299 br32(bp, B44_DEVCTRL);
1300 udelay(100);
1301 }
1302 bp->flags |= B44_FLAG_INTERNAL_PHY;
1303 }
1304}
1305
1306/* bp->lock is held. */
1307static void b44_halt(struct b44 *bp)
1308{
1309 b44_disable_ints(bp);
1310 b44_chip_reset(bp);
1311}
1312
1313/* bp->lock is held. */
1314static void __b44_set_mac_addr(struct b44 *bp)
1315{
1316 bw32(bp, B44_CAM_CTRL, 0);
1317 if (!(bp->dev->flags & IFF_PROMISC)) {
1318 u32 val;
1319
1320 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1321 val = br32(bp, B44_CAM_CTRL);
1322 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1323 }
1324}
1325
1326static int b44_set_mac_addr(struct net_device *dev, void *p)
1327{
1328 struct b44 *bp = netdev_priv(dev);
1329 struct sockaddr *addr = p;
1330
1331 if (netif_running(dev))
1332 return -EBUSY;
1333
Gary Zambrano391fc092006-03-28 14:57:38 -08001334 if (!is_valid_ether_addr(addr->sa_data))
1335 return -EINVAL;
1336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1338
1339 spin_lock_irq(&bp->lock);
1340 __b44_set_mac_addr(bp);
1341 spin_unlock_irq(&bp->lock);
1342
1343 return 0;
1344}
1345
1346/* Called at device open time to get the chip ready for
1347 * packet processing. Invoked with bp->lock held.
1348 */
1349static void __b44_set_rx_mode(struct net_device *);
1350static void b44_init_hw(struct b44 *bp)
1351{
1352 u32 val;
1353
1354 b44_chip_reset(bp);
1355 b44_phy_reset(bp);
1356 b44_setup_phy(bp);
1357
1358 /* Enable CRC32, set proper LED modes and power on PHY */
1359 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1360 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1361
1362 /* This sets the MAC address too. */
1363 __b44_set_rx_mode(bp->dev);
1364
1365 /* MTU + eth header + possible VLAN tag + struct rx_header */
1366 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1367 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1368
1369 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1370 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1371 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1372 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1373 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1374 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1375
1376 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1377 bp->rx_prod = bp->rx_pending;
1378
1379 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1380
1381 val = br32(bp, B44_ENET_CTRL);
1382 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1383}
1384
1385static int b44_open(struct net_device *dev)
1386{
1387 struct b44 *bp = netdev_priv(dev);
1388 int err;
1389
1390 err = b44_alloc_consistent(bp);
1391 if (err)
Francois Romieu6c2f4262005-11-07 01:52:57 +01001392 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 b44_init_rings(bp);
1395 b44_init_hw(bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
John W. Linvillee254e9b2005-06-08 15:11:57 -04001397 b44_check_phy(bp);
1398
Francois Romieu6c2f4262005-11-07 01:52:57 +01001399 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1400 if (unlikely(err < 0)) {
1401 b44_chip_reset(bp);
1402 b44_free_rings(bp);
1403 b44_free_consistent(bp);
1404 goto out;
1405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
1407 init_timer(&bp->timer);
1408 bp->timer.expires = jiffies + HZ;
1409 bp->timer.data = (unsigned long) bp;
1410 bp->timer.function = b44_timer;
1411 add_timer(&bp->timer);
1412
1413 b44_enable_ints(bp);
Mark Lordd9e2d182005-11-30 22:30:23 +01001414 netif_start_queue(dev);
Francois Romieu6c2f4262005-11-07 01:52:57 +01001415out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 return err;
1417}
1418
1419#if 0
1420/*static*/ void b44_dump_state(struct b44 *bp)
1421{
1422 u32 val32, val32_2, val32_3, val32_4, val32_5;
1423 u16 val16;
1424
1425 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1426 printk("DEBUG: PCI status [%04x] \n", val16);
1427
1428}
1429#endif
1430
1431#ifdef CONFIG_NET_POLL_CONTROLLER
1432/*
1433 * Polling receive - used by netconsole and other diagnostic tools
1434 * to allow network i/o with interrupts disabled.
1435 */
1436static void b44_poll_controller(struct net_device *dev)
1437{
1438 disable_irq(dev->irq);
1439 b44_interrupt(dev->irq, dev, NULL);
1440 enable_irq(dev->irq);
1441}
1442#endif
1443
1444static int b44_close(struct net_device *dev)
1445{
1446 struct b44 *bp = netdev_priv(dev);
1447
1448 netif_stop_queue(dev);
1449
Francois Romieuba5eec92005-11-08 23:37:12 +01001450 netif_poll_disable(dev);
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 del_timer_sync(&bp->timer);
1453
1454 spin_lock_irq(&bp->lock);
1455
1456#if 0
1457 b44_dump_state(bp);
1458#endif
1459 b44_halt(bp);
1460 b44_free_rings(bp);
Stephen Hemmingerc35ca392006-01-20 21:13:17 -08001461 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463 spin_unlock_irq(&bp->lock);
1464
1465 free_irq(dev->irq, dev);
1466
Francois Romieuba5eec92005-11-08 23:37:12 +01001467 netif_poll_enable(dev);
1468
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 b44_free_consistent(bp);
1470
1471 return 0;
1472}
1473
1474static struct net_device_stats *b44_get_stats(struct net_device *dev)
1475{
1476 struct b44 *bp = netdev_priv(dev);
1477 struct net_device_stats *nstat = &bp->stats;
1478 struct b44_hw_stats *hwstat = &bp->hw_stats;
1479
1480 /* Convert HW stats into netdevice stats. */
1481 nstat->rx_packets = hwstat->rx_pkts;
1482 nstat->tx_packets = hwstat->tx_pkts;
1483 nstat->rx_bytes = hwstat->rx_octets;
1484 nstat->tx_bytes = hwstat->tx_octets;
1485 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1486 hwstat->tx_oversize_pkts +
1487 hwstat->tx_underruns +
1488 hwstat->tx_excessive_cols +
1489 hwstat->tx_late_cols);
1490 nstat->multicast = hwstat->tx_multicast_pkts;
1491 nstat->collisions = hwstat->tx_total_cols;
1492
1493 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1494 hwstat->rx_undersize);
1495 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1496 nstat->rx_frame_errors = hwstat->rx_align_errs;
1497 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1498 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1499 hwstat->rx_oversize_pkts +
1500 hwstat->rx_missed_pkts +
1501 hwstat->rx_crc_align_errs +
1502 hwstat->rx_undersize +
1503 hwstat->rx_crc_errs +
1504 hwstat->rx_align_errs +
1505 hwstat->rx_symbol_errs);
1506
1507 nstat->tx_aborted_errors = hwstat->tx_underruns;
1508#if 0
1509 /* Carrier lost counter seems to be broken for some devices */
1510 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1511#endif
1512
1513 return nstat;
1514}
1515
1516static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1517{
1518 struct dev_mc_list *mclist;
1519 int i, num_ents;
1520
1521 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1522 mclist = dev->mc_list;
1523 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1524 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1525 }
1526 return i+1;
1527}
1528
1529static void __b44_set_rx_mode(struct net_device *dev)
1530{
1531 struct b44 *bp = netdev_priv(dev);
1532 u32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534 val = br32(bp, B44_RXCONFIG);
1535 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1536 if (dev->flags & IFF_PROMISC) {
1537 val |= RXCONFIG_PROMISC;
1538 bw32(bp, B44_RXCONFIG, val);
1539 } else {
Francois Romieu874a6212005-11-07 01:50:46 +01001540 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1541 int i = 0;
1542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 __b44_set_mac_addr(bp);
1544
1545 if (dev->flags & IFF_ALLMULTI)
1546 val |= RXCONFIG_ALLMULTI;
1547 else
Francois Romieu874a6212005-11-07 01:50:46 +01001548 i = __b44_load_mcast(bp, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Francois Romieu874a6212005-11-07 01:50:46 +01001550 for (; i < 64; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 __b44_cam_write(bp, zero, i);
1552 }
1553 bw32(bp, B44_RXCONFIG, val);
1554 val = br32(bp, B44_CAM_CTRL);
1555 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1556 }
1557}
1558
1559static void b44_set_rx_mode(struct net_device *dev)
1560{
1561 struct b44 *bp = netdev_priv(dev);
1562
1563 spin_lock_irq(&bp->lock);
1564 __b44_set_rx_mode(dev);
1565 spin_unlock_irq(&bp->lock);
1566}
1567
1568static u32 b44_get_msglevel(struct net_device *dev)
1569{
1570 struct b44 *bp = netdev_priv(dev);
1571 return bp->msg_enable;
1572}
1573
1574static void b44_set_msglevel(struct net_device *dev, u32 value)
1575{
1576 struct b44 *bp = netdev_priv(dev);
1577 bp->msg_enable = value;
1578}
1579
1580static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1581{
1582 struct b44 *bp = netdev_priv(dev);
1583 struct pci_dev *pci_dev = bp->pdev;
1584
1585 strcpy (info->driver, DRV_MODULE_NAME);
1586 strcpy (info->version, DRV_MODULE_VERSION);
1587 strcpy (info->bus_info, pci_name(pci_dev));
1588}
1589
1590static int b44_nway_reset(struct net_device *dev)
1591{
1592 struct b44 *bp = netdev_priv(dev);
1593 u32 bmcr;
1594 int r;
1595
1596 spin_lock_irq(&bp->lock);
1597 b44_readphy(bp, MII_BMCR, &bmcr);
1598 b44_readphy(bp, MII_BMCR, &bmcr);
1599 r = -EINVAL;
1600 if (bmcr & BMCR_ANENABLE) {
1601 b44_writephy(bp, MII_BMCR,
1602 bmcr | BMCR_ANRESTART);
1603 r = 0;
1604 }
1605 spin_unlock_irq(&bp->lock);
1606
1607 return r;
1608}
1609
1610static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1611{
1612 struct b44 *bp = netdev_priv(dev);
1613
Francois Romieub9dcbb42005-11-08 23:36:20 +01001614 if (!netif_running(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 return -EAGAIN;
1616 cmd->supported = (SUPPORTED_Autoneg);
1617 cmd->supported |= (SUPPORTED_100baseT_Half |
1618 SUPPORTED_100baseT_Full |
1619 SUPPORTED_10baseT_Half |
1620 SUPPORTED_10baseT_Full |
1621 SUPPORTED_MII);
1622
1623 cmd->advertising = 0;
1624 if (bp->flags & B44_FLAG_ADV_10HALF)
Matthew Wilcoxadf6e002005-10-04 11:25:17 -06001625 cmd->advertising |= ADVERTISED_10baseT_Half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 if (bp->flags & B44_FLAG_ADV_10FULL)
Matthew Wilcoxadf6e002005-10-04 11:25:17 -06001627 cmd->advertising |= ADVERTISED_10baseT_Full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 if (bp->flags & B44_FLAG_ADV_100HALF)
Matthew Wilcoxadf6e002005-10-04 11:25:17 -06001629 cmd->advertising |= ADVERTISED_100baseT_Half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 if (bp->flags & B44_FLAG_ADV_100FULL)
Matthew Wilcoxadf6e002005-10-04 11:25:17 -06001631 cmd->advertising |= ADVERTISED_100baseT_Full;
1632 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1634 SPEED_100 : SPEED_10;
1635 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1636 DUPLEX_FULL : DUPLEX_HALF;
1637 cmd->port = 0;
1638 cmd->phy_address = bp->phy_addr;
1639 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1640 XCVR_INTERNAL : XCVR_EXTERNAL;
1641 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1642 AUTONEG_DISABLE : AUTONEG_ENABLE;
1643 cmd->maxtxpkt = 0;
1644 cmd->maxrxpkt = 0;
1645 return 0;
1646}
1647
1648static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1649{
1650 struct b44 *bp = netdev_priv(dev);
1651
Francois Romieub9dcbb42005-11-08 23:36:20 +01001652 if (!netif_running(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 return -EAGAIN;
1654
1655 /* We do not support gigabit. */
1656 if (cmd->autoneg == AUTONEG_ENABLE) {
1657 if (cmd->advertising &
1658 (ADVERTISED_1000baseT_Half |
1659 ADVERTISED_1000baseT_Full))
1660 return -EINVAL;
1661 } else if ((cmd->speed != SPEED_100 &&
1662 cmd->speed != SPEED_10) ||
1663 (cmd->duplex != DUPLEX_HALF &&
1664 cmd->duplex != DUPLEX_FULL)) {
1665 return -EINVAL;
1666 }
1667
1668 spin_lock_irq(&bp->lock);
1669
1670 if (cmd->autoneg == AUTONEG_ENABLE) {
1671 bp->flags &= ~B44_FLAG_FORCE_LINK;
1672 bp->flags &= ~(B44_FLAG_ADV_10HALF |
1673 B44_FLAG_ADV_10FULL |
1674 B44_FLAG_ADV_100HALF |
1675 B44_FLAG_ADV_100FULL);
1676 if (cmd->advertising & ADVERTISE_10HALF)
1677 bp->flags |= B44_FLAG_ADV_10HALF;
1678 if (cmd->advertising & ADVERTISE_10FULL)
1679 bp->flags |= B44_FLAG_ADV_10FULL;
1680 if (cmd->advertising & ADVERTISE_100HALF)
1681 bp->flags |= B44_FLAG_ADV_100HALF;
1682 if (cmd->advertising & ADVERTISE_100FULL)
1683 bp->flags |= B44_FLAG_ADV_100FULL;
1684 } else {
1685 bp->flags |= B44_FLAG_FORCE_LINK;
1686 if (cmd->speed == SPEED_100)
1687 bp->flags |= B44_FLAG_100_BASE_T;
1688 if (cmd->duplex == DUPLEX_FULL)
1689 bp->flags |= B44_FLAG_FULL_DUPLEX;
1690 }
1691
1692 b44_setup_phy(bp);
1693
1694 spin_unlock_irq(&bp->lock);
1695
1696 return 0;
1697}
1698
1699static void b44_get_ringparam(struct net_device *dev,
1700 struct ethtool_ringparam *ering)
1701{
1702 struct b44 *bp = netdev_priv(dev);
1703
1704 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1705 ering->rx_pending = bp->rx_pending;
1706
1707 /* XXX ethtool lacks a tx_max_pending, oops... */
1708}
1709
1710static int b44_set_ringparam(struct net_device *dev,
1711 struct ethtool_ringparam *ering)
1712{
1713 struct b44 *bp = netdev_priv(dev);
1714
1715 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1716 (ering->rx_mini_pending != 0) ||
1717 (ering->rx_jumbo_pending != 0) ||
1718 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1719 return -EINVAL;
1720
1721 spin_lock_irq(&bp->lock);
1722
1723 bp->rx_pending = ering->rx_pending;
1724 bp->tx_pending = ering->tx_pending;
1725
1726 b44_halt(bp);
1727 b44_init_rings(bp);
1728 b44_init_hw(bp);
1729 netif_wake_queue(bp->dev);
1730 spin_unlock_irq(&bp->lock);
1731
1732 b44_enable_ints(bp);
1733
1734 return 0;
1735}
1736
1737static void b44_get_pauseparam(struct net_device *dev,
1738 struct ethtool_pauseparam *epause)
1739{
1740 struct b44 *bp = netdev_priv(dev);
1741
1742 epause->autoneg =
1743 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1744 epause->rx_pause =
1745 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1746 epause->tx_pause =
1747 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1748}
1749
1750static int b44_set_pauseparam(struct net_device *dev,
1751 struct ethtool_pauseparam *epause)
1752{
1753 struct b44 *bp = netdev_priv(dev);
1754
1755 spin_lock_irq(&bp->lock);
1756 if (epause->autoneg)
1757 bp->flags |= B44_FLAG_PAUSE_AUTO;
1758 else
1759 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1760 if (epause->rx_pause)
1761 bp->flags |= B44_FLAG_RX_PAUSE;
1762 else
1763 bp->flags &= ~B44_FLAG_RX_PAUSE;
1764 if (epause->tx_pause)
1765 bp->flags |= B44_FLAG_TX_PAUSE;
1766 else
1767 bp->flags &= ~B44_FLAG_TX_PAUSE;
1768 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1769 b44_halt(bp);
1770 b44_init_rings(bp);
1771 b44_init_hw(bp);
1772 } else {
1773 __b44_set_flow_ctrl(bp, bp->flags);
1774 }
1775 spin_unlock_irq(&bp->lock);
1776
1777 b44_enable_ints(bp);
1778
1779 return 0;
1780}
1781
Francois Romieu33539302005-11-07 01:51:34 +01001782static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1783{
1784 switch(stringset) {
1785 case ETH_SS_STATS:
1786 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1787 break;
1788 }
1789}
1790
1791static int b44_get_stats_count(struct net_device *dev)
1792{
1793 return ARRAY_SIZE(b44_gstrings);
1794}
1795
1796static void b44_get_ethtool_stats(struct net_device *dev,
1797 struct ethtool_stats *stats, u64 *data)
1798{
1799 struct b44 *bp = netdev_priv(dev);
1800 u32 *val = &bp->hw_stats.tx_good_octets;
1801 u32 i;
1802
1803 spin_lock_irq(&bp->lock);
1804
1805 b44_stats_update(bp);
1806
1807 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1808 *data++ = *val++;
1809
1810 spin_unlock_irq(&bp->lock);
1811}
1812
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813static struct ethtool_ops b44_ethtool_ops = {
1814 .get_drvinfo = b44_get_drvinfo,
1815 .get_settings = b44_get_settings,
1816 .set_settings = b44_set_settings,
1817 .nway_reset = b44_nway_reset,
1818 .get_link = ethtool_op_get_link,
1819 .get_ringparam = b44_get_ringparam,
1820 .set_ringparam = b44_set_ringparam,
1821 .get_pauseparam = b44_get_pauseparam,
1822 .set_pauseparam = b44_set_pauseparam,
1823 .get_msglevel = b44_get_msglevel,
1824 .set_msglevel = b44_set_msglevel,
Francois Romieu33539302005-11-07 01:51:34 +01001825 .get_strings = b44_get_strings,
1826 .get_stats_count = b44_get_stats_count,
1827 .get_ethtool_stats = b44_get_ethtool_stats,
John W. Linville2160de52005-09-12 10:48:55 -04001828 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829};
1830
1831static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1832{
1833 struct mii_ioctl_data *data = if_mii(ifr);
1834 struct b44 *bp = netdev_priv(dev);
Francois Romieu34105722005-11-30 22:32:13 +01001835 int err = -EINVAL;
1836
1837 if (!netif_running(dev))
1838 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
1840 spin_lock_irq(&bp->lock);
1841 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
1842 spin_unlock_irq(&bp->lock);
Francois Romieu34105722005-11-30 22:32:13 +01001843out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 return err;
1845}
1846
1847/* Read 128-bytes of EEPROM. */
1848static int b44_read_eeprom(struct b44 *bp, u8 *data)
1849{
1850 long i;
1851 u16 *ptr = (u16 *) data;
1852
1853 for (i = 0; i < 128; i += 2)
1854 ptr[i / 2] = readw(bp->regs + 4096 + i);
1855
1856 return 0;
1857}
1858
1859static int __devinit b44_get_invariants(struct b44 *bp)
1860{
1861 u8 eeprom[128];
1862 int err;
1863
1864 err = b44_read_eeprom(bp, &eeprom[0]);
1865 if (err)
1866 goto out;
1867
1868 bp->dev->dev_addr[0] = eeprom[79];
1869 bp->dev->dev_addr[1] = eeprom[78];
1870 bp->dev->dev_addr[2] = eeprom[81];
1871 bp->dev->dev_addr[3] = eeprom[80];
1872 bp->dev->dev_addr[4] = eeprom[83];
1873 bp->dev->dev_addr[5] = eeprom[82];
Gary Zambrano391fc092006-03-28 14:57:38 -08001874
1875 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
1876 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
1877 return -EINVAL;
1878 }
1879
John W. Linville2160de52005-09-12 10:48:55 -04001880 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 bp->phy_addr = eeprom[90] & 0x1f;
1883
1884 /* With this, plus the rx_header prepended to the data by the
1885 * hardware, we'll land the ethernet header on a 2-byte boundary.
1886 */
1887 bp->rx_offset = 30;
1888
1889 bp->imask = IMASK_DEF;
1890
1891 bp->core_unit = ssb_core_unit(bp);
1892 bp->dma_offset = SB_PCI_DMA;
1893
1894 /* XXX - really required?
1895 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1896 */
1897out:
1898 return err;
1899}
1900
1901static int __devinit b44_init_one(struct pci_dev *pdev,
1902 const struct pci_device_id *ent)
1903{
1904 static int b44_version_printed = 0;
1905 unsigned long b44reg_base, b44reg_len;
1906 struct net_device *dev;
1907 struct b44 *bp;
1908 int err, i;
1909
1910 if (b44_version_printed++ == 0)
1911 printk(KERN_INFO "%s", version);
1912
1913 err = pci_enable_device(pdev);
1914 if (err) {
1915 printk(KERN_ERR PFX "Cannot enable PCI device, "
1916 "aborting.\n");
1917 return err;
1918 }
1919
1920 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1921 printk(KERN_ERR PFX "Cannot find proper PCI device "
1922 "base address, aborting.\n");
1923 err = -ENODEV;
1924 goto err_out_disable_pdev;
1925 }
1926
1927 err = pci_request_regions(pdev, DRV_MODULE_NAME);
1928 if (err) {
1929 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
1930 "aborting.\n");
1931 goto err_out_disable_pdev;
1932 }
1933
1934 pci_set_master(pdev);
1935
1936 err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
1937 if (err) {
1938 printk(KERN_ERR PFX "No usable DMA configuration, "
1939 "aborting.\n");
1940 goto err_out_free_res;
1941 }
1942
1943 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1944 if (err) {
Francois Romieu874a6212005-11-07 01:50:46 +01001945 printk(KERN_ERR PFX "No usable DMA configuration, "
1946 "aborting.\n");
1947 goto err_out_free_res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 }
1949
1950 b44reg_base = pci_resource_start(pdev, 0);
1951 b44reg_len = pci_resource_len(pdev, 0);
1952
1953 dev = alloc_etherdev(sizeof(*bp));
1954 if (!dev) {
1955 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1956 err = -ENOMEM;
1957 goto err_out_free_res;
1958 }
1959
1960 SET_MODULE_OWNER(dev);
1961 SET_NETDEV_DEV(dev,&pdev->dev);
1962
1963 /* No interesting netdevice features in this card... */
1964 dev->features |= 0;
1965
1966 bp = netdev_priv(dev);
1967 bp->pdev = pdev;
1968 bp->dev = dev;
Francois Romieu874a6212005-11-07 01:50:46 +01001969
1970 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
1972 spin_lock_init(&bp->lock);
1973
1974 bp->regs = ioremap(b44reg_base, b44reg_len);
1975 if (bp->regs == 0UL) {
1976 printk(KERN_ERR PFX "Cannot map device registers, "
1977 "aborting.\n");
1978 err = -ENOMEM;
1979 goto err_out_free_dev;
1980 }
1981
1982 bp->rx_pending = B44_DEF_RX_RING_PENDING;
1983 bp->tx_pending = B44_DEF_TX_RING_PENDING;
1984
1985 dev->open = b44_open;
1986 dev->stop = b44_close;
1987 dev->hard_start_xmit = b44_start_xmit;
1988 dev->get_stats = b44_get_stats;
1989 dev->set_multicast_list = b44_set_rx_mode;
1990 dev->set_mac_address = b44_set_mac_addr;
1991 dev->do_ioctl = b44_ioctl;
1992 dev->tx_timeout = b44_tx_timeout;
1993 dev->poll = b44_poll;
1994 dev->weight = 64;
1995 dev->watchdog_timeo = B44_TX_TIMEOUT;
1996#ifdef CONFIG_NET_POLL_CONTROLLER
1997 dev->poll_controller = b44_poll_controller;
1998#endif
1999 dev->change_mtu = b44_change_mtu;
2000 dev->irq = pdev->irq;
2001 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2002
Stephen Hemmingerc35ca392006-01-20 21:13:17 -08002003 netif_carrier_off(dev);
2004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 err = b44_get_invariants(bp);
2006 if (err) {
2007 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
2008 "aborting.\n");
2009 goto err_out_iounmap;
2010 }
2011
2012 bp->mii_if.dev = dev;
2013 bp->mii_if.mdio_read = b44_mii_read;
2014 bp->mii_if.mdio_write = b44_mii_write;
2015 bp->mii_if.phy_id = bp->phy_addr;
2016 bp->mii_if.phy_id_mask = 0x1f;
2017 bp->mii_if.reg_num_mask = 0x1f;
2018
2019 /* By default, advertise all speed/duplex settings. */
2020 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2021 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2022
2023 /* By default, auto-negotiate PAUSE. */
2024 bp->flags |= B44_FLAG_PAUSE_AUTO;
2025
2026 err = register_netdev(dev);
2027 if (err) {
2028 printk(KERN_ERR PFX "Cannot register net device, "
2029 "aborting.\n");
2030 goto err_out_iounmap;
2031 }
2032
2033 pci_set_drvdata(pdev, dev);
2034
2035 pci_save_state(bp->pdev);
2036
Gary Zambrano5c513122006-03-29 17:12:05 -05002037 /* Chip reset provides power to the b44 MAC & PCI cores, which
2038 * is necessary for MAC register access.
2039 */
2040 b44_chip_reset(bp);
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2043 for (i = 0; i < 6; i++)
2044 printk("%2.2x%c", dev->dev_addr[i],
2045 i == 5 ? '\n' : ':');
2046
2047 return 0;
2048
2049err_out_iounmap:
2050 iounmap(bp->regs);
2051
2052err_out_free_dev:
2053 free_netdev(dev);
2054
2055err_out_free_res:
2056 pci_release_regions(pdev);
2057
2058err_out_disable_pdev:
2059 pci_disable_device(pdev);
2060 pci_set_drvdata(pdev, NULL);
2061 return err;
2062}
2063
2064static void __devexit b44_remove_one(struct pci_dev *pdev)
2065{
2066 struct net_device *dev = pci_get_drvdata(pdev);
Francois Romieu874a6212005-11-07 01:50:46 +01002067 struct b44 *bp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Francois Romieu874a6212005-11-07 01:50:46 +01002069 unregister_netdev(dev);
2070 iounmap(bp->regs);
2071 free_netdev(dev);
2072 pci_release_regions(pdev);
2073 pci_disable_device(pdev);
2074 pci_set_drvdata(pdev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075}
2076
2077static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2078{
2079 struct net_device *dev = pci_get_drvdata(pdev);
2080 struct b44 *bp = netdev_priv(dev);
2081
2082 if (!netif_running(dev))
2083 return 0;
2084
2085 del_timer_sync(&bp->timer);
2086
2087 spin_lock_irq(&bp->lock);
2088
2089 b44_halt(bp);
2090 netif_carrier_off(bp->dev);
2091 netif_device_detach(bp->dev);
2092 b44_free_rings(bp);
2093
2094 spin_unlock_irq(&bp->lock);
Pavel Machek46e17852005-10-28 15:14:47 -07002095
2096 free_irq(dev->irq, dev);
David Shaohua Lid58da592005-03-18 16:43:54 -05002097 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 return 0;
2099}
2100
2101static int b44_resume(struct pci_dev *pdev)
2102{
2103 struct net_device *dev = pci_get_drvdata(pdev);
2104 struct b44 *bp = netdev_priv(dev);
2105
2106 pci_restore_state(pdev);
David Shaohua Lid58da592005-03-18 16:43:54 -05002107 pci_enable_device(pdev);
2108 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 if (!netif_running(dev))
2111 return 0;
2112
Pavel Machek46e17852005-10-28 15:14:47 -07002113 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2114 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2115
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 spin_lock_irq(&bp->lock);
2117
2118 b44_init_rings(bp);
2119 b44_init_hw(bp);
2120 netif_device_attach(bp->dev);
2121 spin_unlock_irq(&bp->lock);
2122
2123 bp->timer.expires = jiffies + HZ;
2124 add_timer(&bp->timer);
2125
2126 b44_enable_ints(bp);
Mark Lordd9e2d182005-11-30 22:30:23 +01002127 netif_wake_queue(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 return 0;
2129}
2130
2131static struct pci_driver b44_driver = {
2132 .name = DRV_MODULE_NAME,
2133 .id_table = b44_pci_tbl,
2134 .probe = b44_init_one,
2135 .remove = __devexit_p(b44_remove_one),
2136 .suspend = b44_suspend,
2137 .resume = b44_resume,
2138};
2139
2140static int __init b44_init(void)
2141{
John W. Linville9f38c632005-10-18 21:30:59 -04002142 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2143
2144 /* Setup paramaters for syncing RX/TX DMA descriptors */
2145 dma_desc_align_mask = ~(dma_desc_align_size - 1);
Alan Cox22d4d772006-01-17 17:53:56 +00002146 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
John W. Linville9f38c632005-10-18 21:30:59 -04002147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 return pci_module_init(&b44_driver);
2149}
2150
2151static void __exit b44_cleanup(void)
2152{
2153 pci_unregister_driver(&b44_driver);
2154}
2155
2156module_init(b44_init);
2157module_exit(b44_cleanup);
2158