Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1 | /* |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 2 | * Cadence MACB/GEM Ethernet Controller driver |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2004-2006 Atmel Corporation |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 12 | #include <linux/clk.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/moduleparam.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/types.h> |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 17 | #include <linux/circ_buf.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 18 | #include <linux/slab.h> |
| 19 | #include <linux/init.h> |
Joachim Eastwood | 2dbfdbb9 | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 20 | #include <linux/gpio.h> |
Alexey Dobriyan | a6b7a40 | 2011-06-06 10:43:46 +0000 | [diff] [blame] | 21 | #include <linux/interrupt.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 22 | #include <linux/netdevice.h> |
| 23 | #include <linux/etherdevice.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 24 | #include <linux/dma-mapping.h> |
Jamie Iles | 84e0cdb | 2011-03-08 20:17:06 +0000 | [diff] [blame] | 25 | #include <linux/platform_data/macb.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 26 | #include <linux/platform_device.h> |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 27 | #include <linux/phy.h> |
Olof Johansson | b17471f | 2011-12-20 13:13:07 -0800 | [diff] [blame] | 28 | #include <linux/of.h> |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 29 | #include <linux/of_device.h> |
| 30 | #include <linux/of_net.h> |
Jean-Christophe PLAGNIOL-VILLARD | 8ef29f8a | 2012-10-31 06:04:59 +0000 | [diff] [blame] | 31 | #include <linux/pinctrl/consumer.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 32 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 33 | #include "macb.h" |
| 34 | |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 35 | #define MACB_RX_BUFFER_SIZE 128 |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 36 | #define RX_BUFFER_MULTIPLE 64 /* bytes */ |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 37 | #define RX_RING_SIZE 512 /* must be power of 2 */ |
| 38 | #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 39 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 40 | #define TX_RING_SIZE 128 /* must be power of 2 */ |
| 41 | #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 42 | |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 43 | /* level of occupied TX descriptors under which we wake up TX process */ |
| 44 | #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 45 | |
| 46 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ |
| 47 | | MACB_BIT(ISR_ROVR)) |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 48 | #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ |
| 49 | | MACB_BIT(ISR_RLE) \ |
| 50 | | MACB_BIT(TXERR)) |
| 51 | #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) |
| 52 | |
| 53 | /* |
| 54 | * Graceful stop timeouts in us. We should allow up to |
| 55 | * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) |
| 56 | */ |
| 57 | #define MACB_HALT_TIMEOUT 1230 |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 58 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 59 | /* Ring buffer accessors */ |
| 60 | static unsigned int macb_tx_ring_wrap(unsigned int index) |
| 61 | { |
| 62 | return index & (TX_RING_SIZE - 1); |
| 63 | } |
| 64 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 65 | static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index) |
| 66 | { |
| 67 | return &bp->tx_ring[macb_tx_ring_wrap(index)]; |
| 68 | } |
| 69 | |
| 70 | static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index) |
| 71 | { |
| 72 | return &bp->tx_skb[macb_tx_ring_wrap(index)]; |
| 73 | } |
| 74 | |
| 75 | static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index) |
| 76 | { |
| 77 | dma_addr_t offset; |
| 78 | |
| 79 | offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); |
| 80 | |
| 81 | return bp->tx_ring_dma + offset; |
| 82 | } |
| 83 | |
| 84 | static unsigned int macb_rx_ring_wrap(unsigned int index) |
| 85 | { |
| 86 | return index & (RX_RING_SIZE - 1); |
| 87 | } |
| 88 | |
| 89 | static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) |
| 90 | { |
| 91 | return &bp->rx_ring[macb_rx_ring_wrap(index)]; |
| 92 | } |
| 93 | |
| 94 | static void *macb_rx_buffer(struct macb *bp, unsigned int index) |
| 95 | { |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 96 | return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 97 | } |
| 98 | |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 99 | void macb_set_hwaddr(struct macb *bp) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 100 | { |
| 101 | u32 bottom; |
| 102 | u16 top; |
| 103 | |
| 104 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 105 | macb_or_gem_writel(bp, SA1B, bottom); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 106 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 107 | macb_or_gem_writel(bp, SA1T, top); |
Joachim Eastwood | 3629a6c | 2012-11-11 13:56:28 +0000 | [diff] [blame] | 108 | |
| 109 | /* Clear unused address register sets */ |
| 110 | macb_or_gem_writel(bp, SA2B, 0); |
| 111 | macb_or_gem_writel(bp, SA2T, 0); |
| 112 | macb_or_gem_writel(bp, SA3B, 0); |
| 113 | macb_or_gem_writel(bp, SA3T, 0); |
| 114 | macb_or_gem_writel(bp, SA4B, 0); |
| 115 | macb_or_gem_writel(bp, SA4T, 0); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 116 | } |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 117 | EXPORT_SYMBOL_GPL(macb_set_hwaddr); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 118 | |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 119 | void macb_get_hwaddr(struct macb *bp) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 120 | { |
Joachim Eastwood | d25e78a | 2012-11-07 08:14:51 +0000 | [diff] [blame] | 121 | struct macb_platform_data *pdata; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 122 | u32 bottom; |
| 123 | u16 top; |
| 124 | u8 addr[6]; |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 125 | int i; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 126 | |
Joachim Eastwood | d25e78a | 2012-11-07 08:14:51 +0000 | [diff] [blame] | 127 | pdata = bp->pdev->dev.platform_data; |
| 128 | |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 129 | /* Check all 4 address register for vaild address */ |
| 130 | for (i = 0; i < 4; i++) { |
| 131 | bottom = macb_or_gem_readl(bp, SA1B + i * 8); |
| 132 | top = macb_or_gem_readl(bp, SA1T + i * 8); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 133 | |
Joachim Eastwood | d25e78a | 2012-11-07 08:14:51 +0000 | [diff] [blame] | 134 | if (pdata && pdata->rev_eth_addr) { |
| 135 | addr[5] = bottom & 0xff; |
| 136 | addr[4] = (bottom >> 8) & 0xff; |
| 137 | addr[3] = (bottom >> 16) & 0xff; |
| 138 | addr[2] = (bottom >> 24) & 0xff; |
| 139 | addr[1] = top & 0xff; |
| 140 | addr[0] = (top & 0xff00) >> 8; |
| 141 | } else { |
| 142 | addr[0] = bottom & 0xff; |
| 143 | addr[1] = (bottom >> 8) & 0xff; |
| 144 | addr[2] = (bottom >> 16) & 0xff; |
| 145 | addr[3] = (bottom >> 24) & 0xff; |
| 146 | addr[4] = top & 0xff; |
| 147 | addr[5] = (top >> 8) & 0xff; |
| 148 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 149 | |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 150 | if (is_valid_ether_addr(addr)) { |
| 151 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); |
| 152 | return; |
| 153 | } |
Sven Schnelle | d1d5741 | 2008-06-09 16:33:57 -0700 | [diff] [blame] | 154 | } |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 155 | |
| 156 | netdev_info(bp->dev, "invalid hw address, using random\n"); |
| 157 | eth_hw_addr_random(bp->dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 158 | } |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 159 | EXPORT_SYMBOL_GPL(macb_get_hwaddr); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 160 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 161 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 162 | { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 163 | struct macb *bp = bus->priv; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 164 | int value; |
| 165 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 166 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
| 167 | | MACB_BF(RW, MACB_MAN_READ) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 168 | | MACB_BF(PHYA, mii_id) |
| 169 | | MACB_BF(REGA, regnum) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 170 | | MACB_BF(CODE, MACB_MAN_CODE))); |
| 171 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 172 | /* wait for end of transfer */ |
| 173 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
| 174 | cpu_relax(); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 175 | |
| 176 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 177 | |
| 178 | return value; |
| 179 | } |
| 180 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 181 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
| 182 | u16 value) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 183 | { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 184 | struct macb *bp = bus->priv; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 185 | |
| 186 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
| 187 | | MACB_BF(RW, MACB_MAN_WRITE) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 188 | | MACB_BF(PHYA, mii_id) |
| 189 | | MACB_BF(REGA, regnum) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 190 | | MACB_BF(CODE, MACB_MAN_CODE) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 191 | | MACB_BF(DATA, value))); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 192 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 193 | /* wait for end of transfer */ |
| 194 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
| 195 | cpu_relax(); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 196 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 197 | return 0; |
| 198 | } |
| 199 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 200 | static int macb_mdio_reset(struct mii_bus *bus) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 201 | { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 202 | return 0; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 203 | } |
| 204 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 205 | static void macb_handle_link_change(struct net_device *dev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 206 | { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 207 | struct macb *bp = netdev_priv(dev); |
| 208 | struct phy_device *phydev = bp->phy_dev; |
| 209 | unsigned long flags; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 210 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 211 | int status_change = 0; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 212 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 213 | spin_lock_irqsave(&bp->lock, flags); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 214 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 215 | if (phydev->link) { |
| 216 | if ((bp->speed != phydev->speed) || |
| 217 | (bp->duplex != phydev->duplex)) { |
| 218 | u32 reg; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 219 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 220 | reg = macb_readl(bp, NCFGR); |
| 221 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 222 | if (macb_is_gem(bp)) |
| 223 | reg &= ~GEM_BIT(GBE); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 224 | |
| 225 | if (phydev->duplex) |
| 226 | reg |= MACB_BIT(FD); |
Atsushi Nemoto | 179956f | 2008-02-21 22:50:54 +0900 | [diff] [blame] | 227 | if (phydev->speed == SPEED_100) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 228 | reg |= MACB_BIT(SPD); |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 229 | if (phydev->speed == SPEED_1000) |
| 230 | reg |= GEM_BIT(GBE); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 231 | |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 232 | macb_or_gem_writel(bp, NCFGR, reg); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 233 | |
| 234 | bp->speed = phydev->speed; |
| 235 | bp->duplex = phydev->duplex; |
| 236 | status_change = 1; |
| 237 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 238 | } |
| 239 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 240 | if (phydev->link != bp->link) { |
Anton Vorontsov | c8f1568 | 2008-07-22 15:41:24 -0700 | [diff] [blame] | 241 | if (!phydev->link) { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 242 | bp->speed = 0; |
| 243 | bp->duplex = -1; |
| 244 | } |
| 245 | bp->link = phydev->link; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 246 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 247 | status_change = 1; |
| 248 | } |
| 249 | |
| 250 | spin_unlock_irqrestore(&bp->lock, flags); |
| 251 | |
| 252 | if (status_change) { |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 253 | if (phydev->link) { |
| 254 | netif_carrier_on(dev); |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 255 | netdev_info(dev, "link up (%d/%s)\n", |
| 256 | phydev->speed, |
| 257 | phydev->duplex == DUPLEX_FULL ? |
| 258 | "Full" : "Half"); |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 259 | } else { |
| 260 | netif_carrier_off(dev); |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 261 | netdev_info(dev, "link down\n"); |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 262 | } |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 263 | } |
| 264 | } |
| 265 | |
| 266 | /* based on au1000_eth. c*/ |
| 267 | static int macb_mii_probe(struct net_device *dev) |
| 268 | { |
| 269 | struct macb *bp = netdev_priv(dev); |
Joachim Eastwood | 2dbfdbb9 | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 270 | struct macb_platform_data *pdata; |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 271 | struct phy_device *phydev; |
Joachim Eastwood | 2dbfdbb9 | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 272 | int phy_irq; |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 273 | int ret; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 274 | |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 275 | phydev = phy_find_first(bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 276 | if (!phydev) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 277 | netdev_err(dev, "no PHY found\n"); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 278 | return -1; |
| 279 | } |
| 280 | |
Joachim Eastwood | 2dbfdbb9 | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 281 | pdata = dev_get_platdata(&bp->pdev->dev); |
| 282 | if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { |
| 283 | ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); |
| 284 | if (!ret) { |
| 285 | phy_irq = gpio_to_irq(pdata->phy_irq_pin); |
| 286 | phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; |
| 287 | } |
| 288 | } |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 289 | |
| 290 | /* attach the mac to the phy */ |
Florian Fainelli | f9a8f83 | 2013-01-14 00:52:52 +0000 | [diff] [blame] | 291 | ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 292 | bp->phy_interface); |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 293 | if (ret) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 294 | netdev_err(dev, "Could not attach to PHY\n"); |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 295 | return ret; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 296 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 297 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 298 | /* mask with MAC supported features */ |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 299 | if (macb_is_gem(bp)) |
| 300 | phydev->supported &= PHY_GBIT_FEATURES; |
| 301 | else |
| 302 | phydev->supported &= PHY_BASIC_FEATURES; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 303 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 304 | phydev->advertising = phydev->supported; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 305 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 306 | bp->link = 0; |
| 307 | bp->speed = 0; |
| 308 | bp->duplex = -1; |
| 309 | bp->phy_dev = phydev; |
| 310 | |
| 311 | return 0; |
| 312 | } |
| 313 | |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 314 | int macb_mii_init(struct macb *bp) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 315 | { |
Jamie Iles | 84e0cdb | 2011-03-08 20:17:06 +0000 | [diff] [blame] | 316 | struct macb_platform_data *pdata; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 317 | int err = -ENXIO, i; |
| 318 | |
Uwe Kleine-Koenig | 3dbda77 | 2009-07-23 08:31:31 +0200 | [diff] [blame] | 319 | /* Enable management port */ |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 320 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
| 321 | |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 322 | bp->mii_bus = mdiobus_alloc(); |
| 323 | if (bp->mii_bus == NULL) { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 324 | err = -ENOMEM; |
| 325 | goto err_out; |
| 326 | } |
| 327 | |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 328 | bp->mii_bus->name = "MACB_mii_bus"; |
| 329 | bp->mii_bus->read = &macb_mdio_read; |
| 330 | bp->mii_bus->write = &macb_mdio_write; |
| 331 | bp->mii_bus->reset = &macb_mdio_reset; |
Florian Fainelli | 98d5e57 | 2012-01-09 23:59:11 +0000 | [diff] [blame] | 332 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
| 333 | bp->pdev->name, bp->pdev->id); |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 334 | bp->mii_bus->priv = bp; |
| 335 | bp->mii_bus->parent = &bp->dev->dev; |
| 336 | pdata = bp->pdev->dev.platform_data; |
| 337 | |
| 338 | if (pdata) |
| 339 | bp->mii_bus->phy_mask = pdata->phy_mask; |
| 340 | |
| 341 | bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
| 342 | if (!bp->mii_bus->irq) { |
| 343 | err = -ENOMEM; |
| 344 | goto err_out_free_mdiobus; |
| 345 | } |
| 346 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 347 | for (i = 0; i < PHY_MAX_ADDR; i++) |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 348 | bp->mii_bus->irq[i] = PHY_POLL; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 349 | |
Jamie Iles | 9152394 | 2011-02-28 04:05:25 +0000 | [diff] [blame] | 350 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 351 | |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 352 | if (mdiobus_register(bp->mii_bus)) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 353 | goto err_out_free_mdio_irq; |
| 354 | |
| 355 | if (macb_mii_probe(bp->dev) != 0) { |
| 356 | goto err_out_unregister_bus; |
| 357 | } |
| 358 | |
| 359 | return 0; |
| 360 | |
| 361 | err_out_unregister_bus: |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 362 | mdiobus_unregister(bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 363 | err_out_free_mdio_irq: |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 364 | kfree(bp->mii_bus->irq); |
| 365 | err_out_free_mdiobus: |
| 366 | mdiobus_free(bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 367 | err_out: |
| 368 | return err; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 369 | } |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 370 | EXPORT_SYMBOL_GPL(macb_mii_init); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 371 | |
| 372 | static void macb_update_stats(struct macb *bp) |
| 373 | { |
| 374 | u32 __iomem *reg = bp->regs + MACB_PFR; |
Jamie Iles | a494ed8 | 2011-03-09 16:26:35 +0000 | [diff] [blame] | 375 | u32 *p = &bp->hw_stats.macb.rx_pause_frames; |
| 376 | u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 377 | |
| 378 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); |
| 379 | |
| 380 | for(; p < end; p++, reg++) |
Haavard Skinnemoen | 0f0d84e | 2006-12-08 14:38:30 +0100 | [diff] [blame] | 381 | *p += __raw_readl(reg); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 382 | } |
| 383 | |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 384 | static int macb_halt_tx(struct macb *bp) |
| 385 | { |
| 386 | unsigned long halt_time, timeout; |
| 387 | u32 status; |
| 388 | |
| 389 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); |
| 390 | |
| 391 | timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); |
| 392 | do { |
| 393 | halt_time = jiffies; |
| 394 | status = macb_readl(bp, TSR); |
| 395 | if (!(status & MACB_BIT(TGO))) |
| 396 | return 0; |
| 397 | |
| 398 | usleep_range(10, 250); |
| 399 | } while (time_before(halt_time, timeout)); |
| 400 | |
| 401 | return -ETIMEDOUT; |
| 402 | } |
| 403 | |
| 404 | static void macb_tx_error_task(struct work_struct *work) |
| 405 | { |
| 406 | struct macb *bp = container_of(work, struct macb, tx_error_task); |
| 407 | struct macb_tx_skb *tx_skb; |
| 408 | struct sk_buff *skb; |
| 409 | unsigned int tail; |
| 410 | |
| 411 | netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n", |
| 412 | bp->tx_tail, bp->tx_head); |
| 413 | |
| 414 | /* Make sure nobody is trying to queue up new packets */ |
| 415 | netif_stop_queue(bp->dev); |
| 416 | |
| 417 | /* |
| 418 | * Stop transmission now |
| 419 | * (in case we have just queued new packets) |
| 420 | */ |
| 421 | if (macb_halt_tx(bp)) |
| 422 | /* Just complain for now, reinitializing TX path can be good */ |
| 423 | netdev_err(bp->dev, "BUG: halt tx timed out\n"); |
| 424 | |
| 425 | /* No need for the lock here as nobody will interrupt us anymore */ |
| 426 | |
| 427 | /* |
| 428 | * Treat frames in TX queue including the ones that caused the error. |
| 429 | * Free transmit buffers in upper layer. |
| 430 | */ |
| 431 | for (tail = bp->tx_tail; tail != bp->tx_head; tail++) { |
| 432 | struct macb_dma_desc *desc; |
| 433 | u32 ctrl; |
| 434 | |
| 435 | desc = macb_tx_desc(bp, tail); |
| 436 | ctrl = desc->ctrl; |
| 437 | tx_skb = macb_tx_skb(bp, tail); |
| 438 | skb = tx_skb->skb; |
| 439 | |
| 440 | if (ctrl & MACB_BIT(TX_USED)) { |
| 441 | netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", |
| 442 | macb_tx_ring_wrap(tail), skb->data); |
| 443 | bp->stats.tx_packets++; |
| 444 | bp->stats.tx_bytes += skb->len; |
| 445 | } else { |
| 446 | /* |
| 447 | * "Buffers exhausted mid-frame" errors may only happen |
| 448 | * if the driver is buggy, so complain loudly about those. |
| 449 | * Statistics are updated by hardware. |
| 450 | */ |
| 451 | if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) |
| 452 | netdev_err(bp->dev, |
| 453 | "BUG: TX buffers exhausted mid-frame\n"); |
| 454 | |
| 455 | desc->ctrl = ctrl | MACB_BIT(TX_USED); |
| 456 | } |
| 457 | |
| 458 | dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, |
| 459 | DMA_TO_DEVICE); |
| 460 | tx_skb->skb = NULL; |
| 461 | dev_kfree_skb(skb); |
| 462 | } |
| 463 | |
| 464 | /* Make descriptor updates visible to hardware */ |
| 465 | wmb(); |
| 466 | |
| 467 | /* Reinitialize the TX desc queue */ |
| 468 | macb_writel(bp, TBQP, bp->tx_ring_dma); |
| 469 | /* Make TX ring reflect state of hardware */ |
| 470 | bp->tx_head = bp->tx_tail = 0; |
| 471 | |
| 472 | /* Now we are ready to start transmission again */ |
| 473 | netif_wake_queue(bp->dev); |
| 474 | |
| 475 | /* Housework before enabling TX IRQ */ |
| 476 | macb_writel(bp, TSR, macb_readl(bp, TSR)); |
| 477 | macb_writel(bp, IER, MACB_TX_INT_FLAGS); |
| 478 | } |
| 479 | |
| 480 | static void macb_tx_interrupt(struct macb *bp) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 481 | { |
| 482 | unsigned int tail; |
| 483 | unsigned int head; |
| 484 | u32 status; |
| 485 | |
| 486 | status = macb_readl(bp, TSR); |
| 487 | macb_writel(bp, TSR, status); |
| 488 | |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 489 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
| 490 | macb_writel(bp, ISR, MACB_BIT(TCOMP)); |
Steffen Trumtrar | 749a2b6 | 2013-03-27 23:07:05 +0000 | [diff] [blame] | 491 | |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 492 | netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", |
| 493 | (unsigned long)status); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 494 | |
| 495 | head = bp->tx_head; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 496 | for (tail = bp->tx_tail; tail != head; tail++) { |
| 497 | struct macb_tx_skb *tx_skb; |
| 498 | struct sk_buff *skb; |
| 499 | struct macb_dma_desc *desc; |
| 500 | u32 ctrl; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 501 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 502 | desc = macb_tx_desc(bp, tail); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 503 | |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 504 | /* Make hw descriptor updates visible to CPU */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 505 | rmb(); |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 506 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 507 | ctrl = desc->ctrl; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 508 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 509 | if (!(ctrl & MACB_BIT(TX_USED))) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 510 | break; |
| 511 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 512 | tx_skb = macb_tx_skb(bp, tail); |
| 513 | skb = tx_skb->skb; |
| 514 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 515 | netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 516 | macb_tx_ring_wrap(tail), skb->data); |
| 517 | dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 518 | DMA_TO_DEVICE); |
| 519 | bp->stats.tx_packets++; |
| 520 | bp->stats.tx_bytes += skb->len; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 521 | tx_skb->skb = NULL; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 522 | dev_kfree_skb_irq(skb); |
| 523 | } |
| 524 | |
| 525 | bp->tx_tail = tail; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 526 | if (netif_queue_stopped(bp->dev) |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 527 | && CIRC_CNT(bp->tx_head, bp->tx_tail, |
| 528 | TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 529 | netif_wake_queue(bp->dev); |
| 530 | } |
| 531 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 532 | static void gem_rx_refill(struct macb *bp) |
| 533 | { |
| 534 | unsigned int entry; |
| 535 | struct sk_buff *skb; |
| 536 | struct macb_dma_desc *desc; |
| 537 | dma_addr_t paddr; |
| 538 | |
| 539 | while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { |
| 540 | u32 addr, ctrl; |
| 541 | |
| 542 | entry = macb_rx_ring_wrap(bp->rx_prepared_head); |
| 543 | desc = &bp->rx_ring[entry]; |
| 544 | |
| 545 | /* Make hw descriptor updates visible to CPU */ |
| 546 | rmb(); |
| 547 | |
| 548 | addr = desc->addr; |
| 549 | ctrl = desc->ctrl; |
| 550 | bp->rx_prepared_head++; |
| 551 | |
| 552 | if ((addr & MACB_BIT(RX_USED))) |
| 553 | continue; |
| 554 | |
| 555 | if (bp->rx_skbuff[entry] == NULL) { |
| 556 | /* allocate sk_buff for this free entry in ring */ |
| 557 | skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); |
| 558 | if (unlikely(skb == NULL)) { |
| 559 | netdev_err(bp->dev, |
| 560 | "Unable to allocate sk_buff\n"); |
| 561 | break; |
| 562 | } |
| 563 | bp->rx_skbuff[entry] = skb; |
| 564 | |
| 565 | /* now fill corresponding descriptor entry */ |
| 566 | paddr = dma_map_single(&bp->pdev->dev, skb->data, |
| 567 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
| 568 | |
| 569 | if (entry == RX_RING_SIZE - 1) |
| 570 | paddr |= MACB_BIT(RX_WRAP); |
| 571 | bp->rx_ring[entry].addr = paddr; |
| 572 | bp->rx_ring[entry].ctrl = 0; |
| 573 | |
| 574 | /* properly align Ethernet header */ |
| 575 | skb_reserve(skb, NET_IP_ALIGN); |
| 576 | } |
| 577 | } |
| 578 | |
| 579 | /* Make descriptor updates visible to hardware */ |
| 580 | wmb(); |
| 581 | |
| 582 | netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", |
| 583 | bp->rx_prepared_head, bp->rx_tail); |
| 584 | } |
| 585 | |
| 586 | /* Mark DMA descriptors from begin up to and not including end as unused */ |
| 587 | static void discard_partial_frame(struct macb *bp, unsigned int begin, |
| 588 | unsigned int end) |
| 589 | { |
| 590 | unsigned int frag; |
| 591 | |
| 592 | for (frag = begin; frag != end; frag++) { |
| 593 | struct macb_dma_desc *desc = macb_rx_desc(bp, frag); |
| 594 | desc->addr &= ~MACB_BIT(RX_USED); |
| 595 | } |
| 596 | |
| 597 | /* Make descriptor updates visible to hardware */ |
| 598 | wmb(); |
| 599 | |
| 600 | /* |
| 601 | * When this happens, the hardware stats registers for |
| 602 | * whatever caused this is updated, so we don't have to record |
| 603 | * anything. |
| 604 | */ |
| 605 | } |
| 606 | |
| 607 | static int gem_rx(struct macb *bp, int budget) |
| 608 | { |
| 609 | unsigned int len; |
| 610 | unsigned int entry; |
| 611 | struct sk_buff *skb; |
| 612 | struct macb_dma_desc *desc; |
| 613 | int count = 0; |
| 614 | |
| 615 | while (count < budget) { |
| 616 | u32 addr, ctrl; |
| 617 | |
| 618 | entry = macb_rx_ring_wrap(bp->rx_tail); |
| 619 | desc = &bp->rx_ring[entry]; |
| 620 | |
| 621 | /* Make hw descriptor updates visible to CPU */ |
| 622 | rmb(); |
| 623 | |
| 624 | addr = desc->addr; |
| 625 | ctrl = desc->ctrl; |
| 626 | |
| 627 | if (!(addr & MACB_BIT(RX_USED))) |
| 628 | break; |
| 629 | |
| 630 | desc->addr &= ~MACB_BIT(RX_USED); |
| 631 | bp->rx_tail++; |
| 632 | count++; |
| 633 | |
| 634 | if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { |
| 635 | netdev_err(bp->dev, |
| 636 | "not whole frame pointed by descriptor\n"); |
| 637 | bp->stats.rx_dropped++; |
| 638 | break; |
| 639 | } |
| 640 | skb = bp->rx_skbuff[entry]; |
| 641 | if (unlikely(!skb)) { |
| 642 | netdev_err(bp->dev, |
| 643 | "inconsistent Rx descriptor chain\n"); |
| 644 | bp->stats.rx_dropped++; |
| 645 | break; |
| 646 | } |
| 647 | /* now everything is ready for receiving packet */ |
| 648 | bp->rx_skbuff[entry] = NULL; |
| 649 | len = MACB_BFEXT(RX_FRMLEN, ctrl); |
| 650 | |
| 651 | netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); |
| 652 | |
| 653 | skb_put(skb, len); |
| 654 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); |
| 655 | dma_unmap_single(&bp->pdev->dev, addr, |
| 656 | len, DMA_FROM_DEVICE); |
| 657 | |
| 658 | skb->protocol = eth_type_trans(skb, bp->dev); |
| 659 | skb_checksum_none_assert(skb); |
| 660 | |
| 661 | bp->stats.rx_packets++; |
| 662 | bp->stats.rx_bytes += skb->len; |
| 663 | |
| 664 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
| 665 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", |
| 666 | skb->len, skb->csum); |
| 667 | print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, |
| 668 | skb->mac_header, 16, true); |
| 669 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, |
| 670 | skb->data, 32, true); |
| 671 | #endif |
| 672 | |
| 673 | netif_receive_skb(skb); |
| 674 | } |
| 675 | |
| 676 | gem_rx_refill(bp); |
| 677 | |
| 678 | return count; |
| 679 | } |
| 680 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 681 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, |
| 682 | unsigned int last_frag) |
| 683 | { |
| 684 | unsigned int len; |
| 685 | unsigned int frag; |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 686 | unsigned int offset; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 687 | struct sk_buff *skb; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 688 | struct macb_dma_desc *desc; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 689 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 690 | desc = macb_rx_desc(bp, last_frag); |
| 691 | len = MACB_BFEXT(RX_FRMLEN, desc->ctrl); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 692 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 693 | netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 694 | macb_rx_ring_wrap(first_frag), |
| 695 | macb_rx_ring_wrap(last_frag), len); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 696 | |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 697 | /* |
| 698 | * The ethernet header starts NET_IP_ALIGN bytes into the |
| 699 | * first buffer. Since the header is 14 bytes, this makes the |
| 700 | * payload word-aligned. |
| 701 | * |
| 702 | * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy |
| 703 | * the two padding bytes into the skb so that we avoid hitting |
| 704 | * the slowpath in memcpy(), and pull them off afterwards. |
| 705 | */ |
| 706 | skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 707 | if (!skb) { |
| 708 | bp->stats.rx_dropped++; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 709 | for (frag = first_frag; ; frag++) { |
| 710 | desc = macb_rx_desc(bp, frag); |
| 711 | desc->addr &= ~MACB_BIT(RX_USED); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 712 | if (frag == last_frag) |
| 713 | break; |
| 714 | } |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 715 | |
| 716 | /* Make descriptor updates visible to hardware */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 717 | wmb(); |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 718 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 719 | return 1; |
| 720 | } |
| 721 | |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 722 | offset = 0; |
| 723 | len += NET_IP_ALIGN; |
Eric Dumazet | bc8acf2 | 2010-09-02 13:07:41 -0700 | [diff] [blame] | 724 | skb_checksum_none_assert(skb); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 725 | skb_put(skb, len); |
| 726 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 727 | for (frag = first_frag; ; frag++) { |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 728 | unsigned int frag_len = bp->rx_buffer_size; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 729 | |
| 730 | if (offset + frag_len > len) { |
| 731 | BUG_ON(frag != last_frag); |
| 732 | frag_len = len - offset; |
| 733 | } |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 734 | skb_copy_to_linear_data_offset(skb, offset, |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 735 | macb_rx_buffer(bp, frag), frag_len); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 736 | offset += bp->rx_buffer_size; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 737 | desc = macb_rx_desc(bp, frag); |
| 738 | desc->addr &= ~MACB_BIT(RX_USED); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 739 | |
| 740 | if (frag == last_frag) |
| 741 | break; |
| 742 | } |
| 743 | |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 744 | /* Make descriptor updates visible to hardware */ |
| 745 | wmb(); |
| 746 | |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 747 | __skb_pull(skb, NET_IP_ALIGN); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 748 | skb->protocol = eth_type_trans(skb, bp->dev); |
| 749 | |
| 750 | bp->stats.rx_packets++; |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 751 | bp->stats.rx_bytes += skb->len; |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 752 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 753 | skb->len, skb->csum); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 754 | netif_receive_skb(skb); |
| 755 | |
| 756 | return 0; |
| 757 | } |
| 758 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 759 | static int macb_rx(struct macb *bp, int budget) |
| 760 | { |
| 761 | int received = 0; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 762 | unsigned int tail; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 763 | int first_frag = -1; |
| 764 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 765 | for (tail = bp->rx_tail; budget > 0; tail++) { |
| 766 | struct macb_dma_desc *desc = macb_rx_desc(bp, tail); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 767 | u32 addr, ctrl; |
| 768 | |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 769 | /* Make hw descriptor updates visible to CPU */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 770 | rmb(); |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 771 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 772 | addr = desc->addr; |
| 773 | ctrl = desc->ctrl; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 774 | |
| 775 | if (!(addr & MACB_BIT(RX_USED))) |
| 776 | break; |
| 777 | |
| 778 | if (ctrl & MACB_BIT(RX_SOF)) { |
| 779 | if (first_frag != -1) |
| 780 | discard_partial_frame(bp, first_frag, tail); |
| 781 | first_frag = tail; |
| 782 | } |
| 783 | |
| 784 | if (ctrl & MACB_BIT(RX_EOF)) { |
| 785 | int dropped; |
| 786 | BUG_ON(first_frag == -1); |
| 787 | |
| 788 | dropped = macb_rx_frame(bp, first_frag, tail); |
| 789 | first_frag = -1; |
| 790 | if (!dropped) { |
| 791 | received++; |
| 792 | budget--; |
| 793 | } |
| 794 | } |
| 795 | } |
| 796 | |
| 797 | if (first_frag != -1) |
| 798 | bp->rx_tail = first_frag; |
| 799 | else |
| 800 | bp->rx_tail = tail; |
| 801 | |
| 802 | return received; |
| 803 | } |
| 804 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 805 | static int macb_poll(struct napi_struct *napi, int budget) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 806 | { |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 807 | struct macb *bp = container_of(napi, struct macb, napi); |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 808 | int work_done; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 809 | u32 status; |
| 810 | |
| 811 | status = macb_readl(bp, RSR); |
| 812 | macb_writel(bp, RSR, status); |
| 813 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 814 | work_done = 0; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 815 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 816 | netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 817 | (unsigned long)status, budget); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 818 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 819 | work_done = bp->macbgem_ops.mog_rx(bp, budget); |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 820 | if (work_done < budget) { |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 821 | napi_complete(napi); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 822 | |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 823 | /* |
| 824 | * We've done what we can to clean the buffers. Make sure we |
| 825 | * get notified when new packets arrive. |
| 826 | */ |
| 827 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); |
Nicolas Ferre | 8770e91 | 2013-02-12 11:08:48 +0100 | [diff] [blame] | 828 | |
| 829 | /* Packets received while interrupts were disabled */ |
| 830 | status = macb_readl(bp, RSR); |
| 831 | if (unlikely(status)) |
| 832 | napi_reschedule(napi); |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 833 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 834 | |
| 835 | /* TODO: Handle errors */ |
| 836 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 837 | return work_done; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 838 | } |
| 839 | |
| 840 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
| 841 | { |
| 842 | struct net_device *dev = dev_id; |
| 843 | struct macb *bp = netdev_priv(dev); |
| 844 | u32 status; |
| 845 | |
| 846 | status = macb_readl(bp, ISR); |
| 847 | |
| 848 | if (unlikely(!status)) |
| 849 | return IRQ_NONE; |
| 850 | |
| 851 | spin_lock(&bp->lock); |
| 852 | |
| 853 | while (status) { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 854 | /* close possible race with dev_close */ |
| 855 | if (unlikely(!netif_running(dev))) { |
Joachim Eastwood | 95ebcea | 2012-10-22 08:45:31 +0000 | [diff] [blame] | 856 | macb_writel(bp, IDR, -1); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 857 | break; |
| 858 | } |
| 859 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 860 | netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status); |
| 861 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 862 | if (status & MACB_RX_INT_FLAGS) { |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 863 | /* |
| 864 | * There's no point taking any more interrupts |
| 865 | * until we have processed the buffers. The |
| 866 | * scheduling call may fail if the poll routine |
| 867 | * is already scheduled, so disable interrupts |
| 868 | * now. |
| 869 | */ |
| 870 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 871 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
| 872 | macb_writel(bp, ISR, MACB_BIT(RCOMP)); |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 873 | |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 874 | if (napi_schedule_prep(&bp->napi)) { |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 875 | netdev_vdbg(bp->dev, "scheduling RX softirq\n"); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 876 | __napi_schedule(&bp->napi); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 877 | } |
| 878 | } |
| 879 | |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 880 | if (unlikely(status & (MACB_TX_ERR_FLAGS))) { |
| 881 | macb_writel(bp, IDR, MACB_TX_INT_FLAGS); |
| 882 | schedule_work(&bp->tx_error_task); |
| 883 | break; |
| 884 | } |
| 885 | |
| 886 | if (status & MACB_BIT(TCOMP)) |
| 887 | macb_tx_interrupt(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 888 | |
| 889 | /* |
| 890 | * Link change detection isn't possible with RMII, so we'll |
| 891 | * add that if/when we get our hands on a full-blown MII PHY. |
| 892 | */ |
| 893 | |
Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 894 | if (status & MACB_BIT(ISR_ROVR)) { |
| 895 | /* We missed at least one packet */ |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 896 | if (macb_is_gem(bp)) |
| 897 | bp->hw_stats.gem.rx_overruns++; |
| 898 | else |
| 899 | bp->hw_stats.macb.rx_overruns++; |
Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 900 | } |
| 901 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 902 | if (status & MACB_BIT(HRESP)) { |
| 903 | /* |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 904 | * TODO: Reset the hardware, and maybe move the |
| 905 | * netdev_err to a lower-priority context as well |
| 906 | * (work queue?) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 907 | */ |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 908 | netdev_err(dev, "DMA bus error: HRESP not OK\n"); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 909 | } |
| 910 | |
| 911 | status = macb_readl(bp, ISR); |
| 912 | } |
| 913 | |
| 914 | spin_unlock(&bp->lock); |
| 915 | |
| 916 | return IRQ_HANDLED; |
| 917 | } |
| 918 | |
Thomas Petazzoni | 6e8cf5c | 2009-05-04 11:08:41 -0700 | [diff] [blame] | 919 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 920 | /* |
| 921 | * Polling receive - used by netconsole and other diagnostic tools |
| 922 | * to allow network i/o with interrupts disabled. |
| 923 | */ |
| 924 | static void macb_poll_controller(struct net_device *dev) |
| 925 | { |
| 926 | unsigned long flags; |
| 927 | |
| 928 | local_irq_save(flags); |
| 929 | macb_interrupt(dev->irq, dev); |
| 930 | local_irq_restore(flags); |
| 931 | } |
| 932 | #endif |
| 933 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 934 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 935 | { |
| 936 | struct macb *bp = netdev_priv(dev); |
| 937 | dma_addr_t mapping; |
| 938 | unsigned int len, entry; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 939 | struct macb_dma_desc *desc; |
| 940 | struct macb_tx_skb *tx_skb; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 941 | u32 ctrl; |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 942 | unsigned long flags; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 943 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 944 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
| 945 | netdev_vdbg(bp->dev, |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 946 | "start_xmit: len %u head %p data %p tail %p end %p\n", |
| 947 | skb->len, skb->head, skb->data, |
| 948 | skb_tail_pointer(skb), skb_end_pointer(skb)); |
| 949 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, |
| 950 | skb->data, 16, true); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 951 | #endif |
| 952 | |
| 953 | len = skb->len; |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 954 | spin_lock_irqsave(&bp->lock, flags); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 955 | |
| 956 | /* This is a hard error, log it. */ |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 957 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 958 | netif_stop_queue(dev); |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 959 | spin_unlock_irqrestore(&bp->lock, flags); |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 960 | netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); |
| 961 | netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", |
| 962 | bp->tx_head, bp->tx_tail); |
Patrick McHardy | 5b54814 | 2009-06-12 06:22:29 +0000 | [diff] [blame] | 963 | return NETDEV_TX_BUSY; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 964 | } |
| 965 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 966 | entry = macb_tx_ring_wrap(bp->tx_head); |
| 967 | bp->tx_head++; |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 968 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 969 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
| 970 | len, DMA_TO_DEVICE); |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 971 | |
| 972 | tx_skb = &bp->tx_skb[entry]; |
| 973 | tx_skb->skb = skb; |
| 974 | tx_skb->mapping = mapping; |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 975 | netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 976 | skb->data, (unsigned long)mapping); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 977 | |
| 978 | ctrl = MACB_BF(TX_FRMLEN, len); |
| 979 | ctrl |= MACB_BIT(TX_LAST); |
| 980 | if (entry == (TX_RING_SIZE - 1)) |
| 981 | ctrl |= MACB_BIT(TX_WRAP); |
| 982 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 983 | desc = &bp->tx_ring[entry]; |
| 984 | desc->addr = mapping; |
| 985 | desc->ctrl = ctrl; |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 986 | |
| 987 | /* Make newly initialized descriptor visible to hardware */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 988 | wmb(); |
| 989 | |
Richard Cochran | e072092 | 2011-06-19 21:51:28 +0000 | [diff] [blame] | 990 | skb_tx_timestamp(skb); |
| 991 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 992 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
| 993 | |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 994 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 995 | netif_stop_queue(dev); |
| 996 | |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 997 | spin_unlock_irqrestore(&bp->lock, flags); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 998 | |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 999 | return NETDEV_TX_OK; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1000 | } |
| 1001 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1002 | static void macb_init_rx_buffer_size(struct macb *bp, size_t size) |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1003 | { |
| 1004 | if (!macb_is_gem(bp)) { |
| 1005 | bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; |
| 1006 | } else { |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1007 | bp->rx_buffer_size = size; |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1008 | |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1009 | if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1010 | netdev_dbg(bp->dev, |
| 1011 | "RX buffer must be multiple of %d bytes, expanding\n", |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1012 | RX_BUFFER_MULTIPLE); |
| 1013 | bp->rx_buffer_size = |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1014 | roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1015 | } |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1016 | } |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1017 | |
| 1018 | netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", |
| 1019 | bp->dev->mtu, bp->rx_buffer_size); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1020 | } |
| 1021 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1022 | static void gem_free_rx_buffers(struct macb *bp) |
| 1023 | { |
| 1024 | struct sk_buff *skb; |
| 1025 | struct macb_dma_desc *desc; |
| 1026 | dma_addr_t addr; |
| 1027 | int i; |
| 1028 | |
| 1029 | if (!bp->rx_skbuff) |
| 1030 | return; |
| 1031 | |
| 1032 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 1033 | skb = bp->rx_skbuff[i]; |
| 1034 | |
| 1035 | if (skb == NULL) |
| 1036 | continue; |
| 1037 | |
| 1038 | desc = &bp->rx_ring[i]; |
| 1039 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); |
| 1040 | dma_unmap_single(&bp->pdev->dev, addr, skb->len, |
| 1041 | DMA_FROM_DEVICE); |
| 1042 | dev_kfree_skb_any(skb); |
| 1043 | skb = NULL; |
| 1044 | } |
| 1045 | |
| 1046 | kfree(bp->rx_skbuff); |
| 1047 | bp->rx_skbuff = NULL; |
| 1048 | } |
| 1049 | |
| 1050 | static void macb_free_rx_buffers(struct macb *bp) |
| 1051 | { |
| 1052 | if (bp->rx_buffers) { |
| 1053 | dma_free_coherent(&bp->pdev->dev, |
| 1054 | RX_RING_SIZE * bp->rx_buffer_size, |
| 1055 | bp->rx_buffers, bp->rx_buffers_dma); |
| 1056 | bp->rx_buffers = NULL; |
| 1057 | } |
| 1058 | } |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1059 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1060 | static void macb_free_consistent(struct macb *bp) |
| 1061 | { |
| 1062 | if (bp->tx_skb) { |
| 1063 | kfree(bp->tx_skb); |
| 1064 | bp->tx_skb = NULL; |
| 1065 | } |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1066 | bp->macbgem_ops.mog_free_rx_buffers(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1067 | if (bp->rx_ring) { |
| 1068 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, |
| 1069 | bp->rx_ring, bp->rx_ring_dma); |
| 1070 | bp->rx_ring = NULL; |
| 1071 | } |
| 1072 | if (bp->tx_ring) { |
| 1073 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, |
| 1074 | bp->tx_ring, bp->tx_ring_dma); |
| 1075 | bp->tx_ring = NULL; |
| 1076 | } |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1077 | } |
| 1078 | |
| 1079 | static int gem_alloc_rx_buffers(struct macb *bp) |
| 1080 | { |
| 1081 | int size; |
| 1082 | |
| 1083 | size = RX_RING_SIZE * sizeof(struct sk_buff *); |
| 1084 | bp->rx_skbuff = kzalloc(size, GFP_KERNEL); |
| 1085 | if (!bp->rx_skbuff) |
| 1086 | return -ENOMEM; |
| 1087 | else |
| 1088 | netdev_dbg(bp->dev, |
| 1089 | "Allocated %d RX struct sk_buff entries at %p\n", |
| 1090 | RX_RING_SIZE, bp->rx_skbuff); |
| 1091 | return 0; |
| 1092 | } |
| 1093 | |
| 1094 | static int macb_alloc_rx_buffers(struct macb *bp) |
| 1095 | { |
| 1096 | int size; |
| 1097 | |
| 1098 | size = RX_RING_SIZE * bp->rx_buffer_size; |
| 1099 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1100 | &bp->rx_buffers_dma, GFP_KERNEL); |
| 1101 | if (!bp->rx_buffers) |
| 1102 | return -ENOMEM; |
| 1103 | else |
| 1104 | netdev_dbg(bp->dev, |
| 1105 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", |
| 1106 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); |
| 1107 | return 0; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1108 | } |
| 1109 | |
| 1110 | static int macb_alloc_consistent(struct macb *bp) |
| 1111 | { |
| 1112 | int size; |
| 1113 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 1114 | size = TX_RING_SIZE * sizeof(struct macb_tx_skb); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1115 | bp->tx_skb = kmalloc(size, GFP_KERNEL); |
| 1116 | if (!bp->tx_skb) |
| 1117 | goto out_err; |
| 1118 | |
| 1119 | size = RX_RING_BYTES; |
| 1120 | bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1121 | &bp->rx_ring_dma, GFP_KERNEL); |
| 1122 | if (!bp->rx_ring) |
| 1123 | goto out_err; |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1124 | netdev_dbg(bp->dev, |
| 1125 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", |
| 1126 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1127 | |
| 1128 | size = TX_RING_BYTES; |
| 1129 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1130 | &bp->tx_ring_dma, GFP_KERNEL); |
| 1131 | if (!bp->tx_ring) |
| 1132 | goto out_err; |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1133 | netdev_dbg(bp->dev, |
| 1134 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", |
| 1135 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1136 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1137 | if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1138 | goto out_err; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1139 | |
| 1140 | return 0; |
| 1141 | |
| 1142 | out_err: |
| 1143 | macb_free_consistent(bp); |
| 1144 | return -ENOMEM; |
| 1145 | } |
| 1146 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1147 | static void gem_init_rings(struct macb *bp) |
| 1148 | { |
| 1149 | int i; |
| 1150 | |
| 1151 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 1152 | bp->tx_ring[i].addr = 0; |
| 1153 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
| 1154 | } |
| 1155 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
| 1156 | |
| 1157 | bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0; |
| 1158 | |
| 1159 | gem_rx_refill(bp); |
| 1160 | } |
| 1161 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1162 | static void macb_init_rings(struct macb *bp) |
| 1163 | { |
| 1164 | int i; |
| 1165 | dma_addr_t addr; |
| 1166 | |
| 1167 | addr = bp->rx_buffers_dma; |
| 1168 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 1169 | bp->rx_ring[i].addr = addr; |
| 1170 | bp->rx_ring[i].ctrl = 0; |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1171 | addr += bp->rx_buffer_size; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1172 | } |
| 1173 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); |
| 1174 | |
| 1175 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 1176 | bp->tx_ring[i].addr = 0; |
| 1177 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
| 1178 | } |
| 1179 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
| 1180 | |
| 1181 | bp->rx_tail = bp->tx_head = bp->tx_tail = 0; |
| 1182 | } |
| 1183 | |
| 1184 | static void macb_reset_hw(struct macb *bp) |
| 1185 | { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1186 | /* |
| 1187 | * Disable RX and TX (XXX: Should we halt the transmission |
| 1188 | * more gracefully?) |
| 1189 | */ |
| 1190 | macb_writel(bp, NCR, 0); |
| 1191 | |
| 1192 | /* Clear the stats registers (XXX: Update stats first?) */ |
| 1193 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); |
| 1194 | |
| 1195 | /* Clear all status flags */ |
Joachim Eastwood | 95ebcea | 2012-10-22 08:45:31 +0000 | [diff] [blame] | 1196 | macb_writel(bp, TSR, -1); |
| 1197 | macb_writel(bp, RSR, -1); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1198 | |
| 1199 | /* Disable all interrupts */ |
Joachim Eastwood | 95ebcea | 2012-10-22 08:45:31 +0000 | [diff] [blame] | 1200 | macb_writel(bp, IDR, -1); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1201 | macb_readl(bp, ISR); |
| 1202 | } |
| 1203 | |
Jamie Iles | 70c9f3d | 2011-03-09 16:22:54 +0000 | [diff] [blame] | 1204 | static u32 gem_mdc_clk_div(struct macb *bp) |
| 1205 | { |
| 1206 | u32 config; |
| 1207 | unsigned long pclk_hz = clk_get_rate(bp->pclk); |
| 1208 | |
| 1209 | if (pclk_hz <= 20000000) |
| 1210 | config = GEM_BF(CLK, GEM_CLK_DIV8); |
| 1211 | else if (pclk_hz <= 40000000) |
| 1212 | config = GEM_BF(CLK, GEM_CLK_DIV16); |
| 1213 | else if (pclk_hz <= 80000000) |
| 1214 | config = GEM_BF(CLK, GEM_CLK_DIV32); |
| 1215 | else if (pclk_hz <= 120000000) |
| 1216 | config = GEM_BF(CLK, GEM_CLK_DIV48); |
| 1217 | else if (pclk_hz <= 160000000) |
| 1218 | config = GEM_BF(CLK, GEM_CLK_DIV64); |
| 1219 | else |
| 1220 | config = GEM_BF(CLK, GEM_CLK_DIV96); |
| 1221 | |
| 1222 | return config; |
| 1223 | } |
| 1224 | |
| 1225 | static u32 macb_mdc_clk_div(struct macb *bp) |
| 1226 | { |
| 1227 | u32 config; |
| 1228 | unsigned long pclk_hz; |
| 1229 | |
| 1230 | if (macb_is_gem(bp)) |
| 1231 | return gem_mdc_clk_div(bp); |
| 1232 | |
| 1233 | pclk_hz = clk_get_rate(bp->pclk); |
| 1234 | if (pclk_hz <= 20000000) |
| 1235 | config = MACB_BF(CLK, MACB_CLK_DIV8); |
| 1236 | else if (pclk_hz <= 40000000) |
| 1237 | config = MACB_BF(CLK, MACB_CLK_DIV16); |
| 1238 | else if (pclk_hz <= 80000000) |
| 1239 | config = MACB_BF(CLK, MACB_CLK_DIV32); |
| 1240 | else |
| 1241 | config = MACB_BF(CLK, MACB_CLK_DIV64); |
| 1242 | |
| 1243 | return config; |
| 1244 | } |
| 1245 | |
Jamie Iles | 757a03c | 2011-03-09 16:29:59 +0000 | [diff] [blame] | 1246 | /* |
| 1247 | * Get the DMA bus width field of the network configuration register that we |
| 1248 | * should program. We find the width from decoding the design configuration |
| 1249 | * register to find the maximum supported data bus width. |
| 1250 | */ |
| 1251 | static u32 macb_dbw(struct macb *bp) |
| 1252 | { |
| 1253 | if (!macb_is_gem(bp)) |
| 1254 | return 0; |
| 1255 | |
| 1256 | switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { |
| 1257 | case 4: |
| 1258 | return GEM_BF(DBW, GEM_DBW128); |
| 1259 | case 2: |
| 1260 | return GEM_BF(DBW, GEM_DBW64); |
| 1261 | case 1: |
| 1262 | default: |
| 1263 | return GEM_BF(DBW, GEM_DBW32); |
| 1264 | } |
| 1265 | } |
| 1266 | |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1267 | /* |
Nicolas Ferre | b3e3bd71 | 2012-11-23 03:49:01 +0000 | [diff] [blame] | 1268 | * Configure the receive DMA engine |
| 1269 | * - use the correct receive buffer size |
| 1270 | * - set the possibility to use INCR16 bursts |
| 1271 | * (if not supported by FIFO, it will fallback to default) |
| 1272 | * - set both rx/tx packet buffers to full memory size |
| 1273 | * These are configurable parameters for GEM. |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1274 | */ |
| 1275 | static void macb_configure_dma(struct macb *bp) |
| 1276 | { |
| 1277 | u32 dmacfg; |
| 1278 | |
| 1279 | if (macb_is_gem(bp)) { |
| 1280 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1281 | dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); |
Nicolas Ferre | b3e3bd71 | 2012-11-23 03:49:01 +0000 | [diff] [blame] | 1282 | dmacfg |= GEM_BF(FBLDO, 16); |
| 1283 | dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); |
Steffen Trumtrar | a1ae385 | 2013-03-27 23:07:06 +0000 | [diff] [blame] | 1284 | dmacfg &= ~GEM_BIT(ENDIA); |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1285 | gem_writel(bp, DMACFG, dmacfg); |
| 1286 | } |
| 1287 | } |
| 1288 | |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 1289 | /* |
| 1290 | * Configure peripheral capacities according to integration options used |
| 1291 | */ |
| 1292 | static void macb_configure_caps(struct macb *bp) |
| 1293 | { |
| 1294 | if (macb_is_gem(bp)) { |
| 1295 | if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0) |
| 1296 | bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; |
| 1297 | } |
| 1298 | } |
| 1299 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1300 | static void macb_init_hw(struct macb *bp) |
| 1301 | { |
| 1302 | u32 config; |
| 1303 | |
| 1304 | macb_reset_hw(bp); |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 1305 | macb_set_hwaddr(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1306 | |
Jamie Iles | 70c9f3d | 2011-03-09 16:22:54 +0000 | [diff] [blame] | 1307 | config = macb_mdc_clk_div(bp); |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 1308 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1309 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
| 1310 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
Peter Korsgaard | 8dd4bd0 | 2010-04-07 21:53:41 -0700 | [diff] [blame] | 1311 | config |= MACB_BIT(BIG); /* Receive oversized frames */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1312 | if (bp->dev->flags & IFF_PROMISC) |
| 1313 | config |= MACB_BIT(CAF); /* Copy All Frames */ |
| 1314 | if (!(bp->dev->flags & IFF_BROADCAST)) |
| 1315 | config |= MACB_BIT(NBC); /* No BroadCast */ |
Jamie Iles | 757a03c | 2011-03-09 16:29:59 +0000 | [diff] [blame] | 1316 | config |= macb_dbw(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1317 | macb_writel(bp, NCFGR, config); |
Vitalii Demianets | 26cdfb4 | 2012-11-02 07:09:24 +0000 | [diff] [blame] | 1318 | bp->speed = SPEED_10; |
| 1319 | bp->duplex = DUPLEX_HALF; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1320 | |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1321 | macb_configure_dma(bp); |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 1322 | macb_configure_caps(bp); |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1323 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1324 | /* Initialize TX and RX buffers */ |
| 1325 | macb_writel(bp, RBQP, bp->rx_ring_dma); |
| 1326 | macb_writel(bp, TBQP, bp->tx_ring_dma); |
| 1327 | |
| 1328 | /* Enable TX and RX */ |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1329 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1330 | |
| 1331 | /* Enable interrupts */ |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 1332 | macb_writel(bp, IER, (MACB_RX_INT_FLAGS |
| 1333 | | MACB_TX_INT_FLAGS |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1334 | | MACB_BIT(HRESP))); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1335 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1336 | } |
| 1337 | |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1338 | /* |
| 1339 | * The hash address register is 64 bits long and takes up two |
| 1340 | * locations in the memory map. The least significant bits are stored |
| 1341 | * in EMAC_HSL and the most significant bits in EMAC_HSH. |
| 1342 | * |
| 1343 | * The unicast hash enable and the multicast hash enable bits in the |
| 1344 | * network configuration register enable the reception of hash matched |
| 1345 | * frames. The destination address is reduced to a 6 bit index into |
| 1346 | * the 64 bit hash register using the following hash function. The |
| 1347 | * hash function is an exclusive or of every sixth bit of the |
| 1348 | * destination address. |
| 1349 | * |
| 1350 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] |
| 1351 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] |
| 1352 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] |
| 1353 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] |
| 1354 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] |
| 1355 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] |
| 1356 | * |
| 1357 | * da[0] represents the least significant bit of the first byte |
| 1358 | * received, that is, the multicast/unicast indicator, and da[47] |
| 1359 | * represents the most significant bit of the last byte received. If |
| 1360 | * the hash index, hi[n], points to a bit that is set in the hash |
| 1361 | * register then the frame will be matched according to whether the |
| 1362 | * frame is multicast or unicast. A multicast match will be signalled |
| 1363 | * if the multicast hash enable bit is set, da[0] is 1 and the hash |
| 1364 | * index points to a bit set in the hash register. A unicast match |
| 1365 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 |
| 1366 | * and the hash index points to a bit set in the hash register. To |
| 1367 | * receive all multicast frames, the hash register should be set with |
| 1368 | * all ones and the multicast hash enable bit should be set in the |
| 1369 | * network configuration register. |
| 1370 | */ |
| 1371 | |
| 1372 | static inline int hash_bit_value(int bitnr, __u8 *addr) |
| 1373 | { |
| 1374 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) |
| 1375 | return 1; |
| 1376 | return 0; |
| 1377 | } |
| 1378 | |
| 1379 | /* |
| 1380 | * Return the hash index value for the specified address. |
| 1381 | */ |
| 1382 | static int hash_get_index(__u8 *addr) |
| 1383 | { |
| 1384 | int i, j, bitval; |
| 1385 | int hash_index = 0; |
| 1386 | |
| 1387 | for (j = 0; j < 6; j++) { |
| 1388 | for (i = 0, bitval = 0; i < 8; i++) |
| 1389 | bitval ^= hash_bit_value(i*6 + j, addr); |
| 1390 | |
| 1391 | hash_index |= (bitval << j); |
| 1392 | } |
| 1393 | |
| 1394 | return hash_index; |
| 1395 | } |
| 1396 | |
| 1397 | /* |
| 1398 | * Add multicast addresses to the internal multicast-hash table. |
| 1399 | */ |
| 1400 | static void macb_sethashtable(struct net_device *dev) |
| 1401 | { |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1402 | struct netdev_hw_addr *ha; |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1403 | unsigned long mc_filter[2]; |
Jiri Pirko | f9dcbcc | 2010-02-23 09:19:49 +0000 | [diff] [blame] | 1404 | unsigned int bitnr; |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1405 | struct macb *bp = netdev_priv(dev); |
| 1406 | |
| 1407 | mc_filter[0] = mc_filter[1] = 0; |
| 1408 | |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1409 | netdev_for_each_mc_addr(ha, dev) { |
| 1410 | bitnr = hash_get_index(ha->addr); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1411 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
| 1412 | } |
| 1413 | |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1414 | macb_or_gem_writel(bp, HRB, mc_filter[0]); |
| 1415 | macb_or_gem_writel(bp, HRT, mc_filter[1]); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1416 | } |
| 1417 | |
| 1418 | /* |
| 1419 | * Enable/Disable promiscuous and multicast modes. |
| 1420 | */ |
Joachim Eastwood | e0da1f1 | 2012-10-18 11:01:15 +0000 | [diff] [blame] | 1421 | void macb_set_rx_mode(struct net_device *dev) |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1422 | { |
| 1423 | unsigned long cfg; |
| 1424 | struct macb *bp = netdev_priv(dev); |
| 1425 | |
| 1426 | cfg = macb_readl(bp, NCFGR); |
| 1427 | |
| 1428 | if (dev->flags & IFF_PROMISC) |
| 1429 | /* Enable promiscuous mode */ |
| 1430 | cfg |= MACB_BIT(CAF); |
| 1431 | else if (dev->flags & (~IFF_PROMISC)) |
| 1432 | /* Disable promiscuous mode */ |
| 1433 | cfg &= ~MACB_BIT(CAF); |
| 1434 | |
| 1435 | if (dev->flags & IFF_ALLMULTI) { |
| 1436 | /* Enable all multicast mode */ |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1437 | macb_or_gem_writel(bp, HRB, -1); |
| 1438 | macb_or_gem_writel(bp, HRT, -1); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1439 | cfg |= MACB_BIT(NCFGR_MTI); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1440 | } else if (!netdev_mc_empty(dev)) { |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1441 | /* Enable specific multicasts */ |
| 1442 | macb_sethashtable(dev); |
| 1443 | cfg |= MACB_BIT(NCFGR_MTI); |
| 1444 | } else if (dev->flags & (~IFF_ALLMULTI)) { |
| 1445 | /* Disable all multicast mode */ |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1446 | macb_or_gem_writel(bp, HRB, 0); |
| 1447 | macb_or_gem_writel(bp, HRT, 0); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1448 | cfg &= ~MACB_BIT(NCFGR_MTI); |
| 1449 | } |
| 1450 | |
| 1451 | macb_writel(bp, NCFGR, cfg); |
| 1452 | } |
Joachim Eastwood | e0da1f1 | 2012-10-18 11:01:15 +0000 | [diff] [blame] | 1453 | EXPORT_SYMBOL_GPL(macb_set_rx_mode); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1454 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1455 | static int macb_open(struct net_device *dev) |
| 1456 | { |
| 1457 | struct macb *bp = netdev_priv(dev); |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1458 | size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1459 | int err; |
| 1460 | |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1461 | netdev_dbg(bp->dev, "open\n"); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1462 | |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 1463 | /* carrier starts down */ |
| 1464 | netif_carrier_off(dev); |
| 1465 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1466 | /* if the phy is not yet register, retry later*/ |
| 1467 | if (!bp->phy_dev) |
| 1468 | return -EAGAIN; |
| 1469 | |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1470 | /* RX buffers initialization */ |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1471 | macb_init_rx_buffer_size(bp, bufsz); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1472 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1473 | err = macb_alloc_consistent(bp); |
| 1474 | if (err) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1475 | netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", |
| 1476 | err); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1477 | return err; |
| 1478 | } |
| 1479 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1480 | napi_enable(&bp->napi); |
| 1481 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1482 | bp->macbgem_ops.mog_init_rings(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1483 | macb_init_hw(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1484 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1485 | /* schedule a link state check */ |
| 1486 | phy_start(bp->phy_dev); |
| 1487 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1488 | netif_start_queue(dev); |
| 1489 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1490 | return 0; |
| 1491 | } |
| 1492 | |
| 1493 | static int macb_close(struct net_device *dev) |
| 1494 | { |
| 1495 | struct macb *bp = netdev_priv(dev); |
| 1496 | unsigned long flags; |
| 1497 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1498 | netif_stop_queue(dev); |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1499 | napi_disable(&bp->napi); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1500 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1501 | if (bp->phy_dev) |
| 1502 | phy_stop(bp->phy_dev); |
| 1503 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1504 | spin_lock_irqsave(&bp->lock, flags); |
| 1505 | macb_reset_hw(bp); |
| 1506 | netif_carrier_off(dev); |
| 1507 | spin_unlock_irqrestore(&bp->lock, flags); |
| 1508 | |
| 1509 | macb_free_consistent(bp); |
| 1510 | |
| 1511 | return 0; |
| 1512 | } |
| 1513 | |
Jamie Iles | a494ed8 | 2011-03-09 16:26:35 +0000 | [diff] [blame] | 1514 | static void gem_update_stats(struct macb *bp) |
| 1515 | { |
| 1516 | u32 __iomem *reg = bp->regs + GEM_OTX; |
| 1517 | u32 *p = &bp->hw_stats.gem.tx_octets_31_0; |
| 1518 | u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1; |
| 1519 | |
| 1520 | for (; p < end; p++, reg++) |
| 1521 | *p += __raw_readl(reg); |
| 1522 | } |
| 1523 | |
| 1524 | static struct net_device_stats *gem_get_stats(struct macb *bp) |
| 1525 | { |
| 1526 | struct gem_stats *hwstat = &bp->hw_stats.gem; |
| 1527 | struct net_device_stats *nstat = &bp->stats; |
| 1528 | |
| 1529 | gem_update_stats(bp); |
| 1530 | |
| 1531 | nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + |
| 1532 | hwstat->rx_alignment_errors + |
| 1533 | hwstat->rx_resource_errors + |
| 1534 | hwstat->rx_overruns + |
| 1535 | hwstat->rx_oversize_frames + |
| 1536 | hwstat->rx_jabbers + |
| 1537 | hwstat->rx_undersized_frames + |
| 1538 | hwstat->rx_length_field_frame_errors); |
| 1539 | nstat->tx_errors = (hwstat->tx_late_collisions + |
| 1540 | hwstat->tx_excessive_collisions + |
| 1541 | hwstat->tx_underrun + |
| 1542 | hwstat->tx_carrier_sense_errors); |
| 1543 | nstat->multicast = hwstat->rx_multicast_frames; |
| 1544 | nstat->collisions = (hwstat->tx_single_collision_frames + |
| 1545 | hwstat->tx_multiple_collision_frames + |
| 1546 | hwstat->tx_excessive_collisions); |
| 1547 | nstat->rx_length_errors = (hwstat->rx_oversize_frames + |
| 1548 | hwstat->rx_jabbers + |
| 1549 | hwstat->rx_undersized_frames + |
| 1550 | hwstat->rx_length_field_frame_errors); |
| 1551 | nstat->rx_over_errors = hwstat->rx_resource_errors; |
| 1552 | nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; |
| 1553 | nstat->rx_frame_errors = hwstat->rx_alignment_errors; |
| 1554 | nstat->rx_fifo_errors = hwstat->rx_overruns; |
| 1555 | nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; |
| 1556 | nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; |
| 1557 | nstat->tx_fifo_errors = hwstat->tx_underrun; |
| 1558 | |
| 1559 | return nstat; |
| 1560 | } |
| 1561 | |
Joachim Eastwood | 2ea32ee | 2012-11-07 08:14:54 +0000 | [diff] [blame] | 1562 | struct net_device_stats *macb_get_stats(struct net_device *dev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1563 | { |
| 1564 | struct macb *bp = netdev_priv(dev); |
| 1565 | struct net_device_stats *nstat = &bp->stats; |
Jamie Iles | a494ed8 | 2011-03-09 16:26:35 +0000 | [diff] [blame] | 1566 | struct macb_stats *hwstat = &bp->hw_stats.macb; |
| 1567 | |
| 1568 | if (macb_is_gem(bp)) |
| 1569 | return gem_get_stats(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1570 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1571 | /* read stats from hardware */ |
| 1572 | macb_update_stats(bp); |
| 1573 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1574 | /* Convert HW stats into netdevice stats */ |
| 1575 | nstat->rx_errors = (hwstat->rx_fcs_errors + |
| 1576 | hwstat->rx_align_errors + |
| 1577 | hwstat->rx_resource_errors + |
| 1578 | hwstat->rx_overruns + |
| 1579 | hwstat->rx_oversize_pkts + |
| 1580 | hwstat->rx_jabbers + |
| 1581 | hwstat->rx_undersize_pkts + |
| 1582 | hwstat->sqe_test_errors + |
| 1583 | hwstat->rx_length_mismatch); |
| 1584 | nstat->tx_errors = (hwstat->tx_late_cols + |
| 1585 | hwstat->tx_excessive_cols + |
| 1586 | hwstat->tx_underruns + |
| 1587 | hwstat->tx_carrier_errors); |
| 1588 | nstat->collisions = (hwstat->tx_single_cols + |
| 1589 | hwstat->tx_multiple_cols + |
| 1590 | hwstat->tx_excessive_cols); |
| 1591 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
| 1592 | hwstat->rx_jabbers + |
| 1593 | hwstat->rx_undersize_pkts + |
| 1594 | hwstat->rx_length_mismatch); |
Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 1595 | nstat->rx_over_errors = hwstat->rx_resource_errors + |
| 1596 | hwstat->rx_overruns; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1597 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; |
| 1598 | nstat->rx_frame_errors = hwstat->rx_align_errors; |
| 1599 | nstat->rx_fifo_errors = hwstat->rx_overruns; |
| 1600 | /* XXX: What does "missed" mean? */ |
| 1601 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; |
| 1602 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; |
| 1603 | nstat->tx_fifo_errors = hwstat->tx_underruns; |
| 1604 | /* Don't know about heartbeat or window errors... */ |
| 1605 | |
| 1606 | return nstat; |
| 1607 | } |
Joachim Eastwood | 2ea32ee | 2012-11-07 08:14:54 +0000 | [diff] [blame] | 1608 | EXPORT_SYMBOL_GPL(macb_get_stats); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1609 | |
| 1610 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 1611 | { |
| 1612 | struct macb *bp = netdev_priv(dev); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1613 | struct phy_device *phydev = bp->phy_dev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1614 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1615 | if (!phydev) |
| 1616 | return -ENODEV; |
| 1617 | |
| 1618 | return phy_ethtool_gset(phydev, cmd); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1619 | } |
| 1620 | |
| 1621 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 1622 | { |
| 1623 | struct macb *bp = netdev_priv(dev); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1624 | struct phy_device *phydev = bp->phy_dev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1625 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1626 | if (!phydev) |
| 1627 | return -ENODEV; |
| 1628 | |
| 1629 | return phy_ethtool_sset(phydev, cmd); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1630 | } |
| 1631 | |
Nicolas Ferre | d1d1b53 | 2012-10-31 06:04:56 +0000 | [diff] [blame] | 1632 | static int macb_get_regs_len(struct net_device *netdev) |
| 1633 | { |
| 1634 | return MACB_GREGS_NBR * sizeof(u32); |
| 1635 | } |
| 1636 | |
| 1637 | static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
| 1638 | void *p) |
| 1639 | { |
| 1640 | struct macb *bp = netdev_priv(dev); |
| 1641 | unsigned int tail, head; |
| 1642 | u32 *regs_buff = p; |
| 1643 | |
| 1644 | regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) |
| 1645 | | MACB_GREGS_VERSION; |
| 1646 | |
| 1647 | tail = macb_tx_ring_wrap(bp->tx_tail); |
| 1648 | head = macb_tx_ring_wrap(bp->tx_head); |
| 1649 | |
| 1650 | regs_buff[0] = macb_readl(bp, NCR); |
| 1651 | regs_buff[1] = macb_or_gem_readl(bp, NCFGR); |
| 1652 | regs_buff[2] = macb_readl(bp, NSR); |
| 1653 | regs_buff[3] = macb_readl(bp, TSR); |
| 1654 | regs_buff[4] = macb_readl(bp, RBQP); |
| 1655 | regs_buff[5] = macb_readl(bp, TBQP); |
| 1656 | regs_buff[6] = macb_readl(bp, RSR); |
| 1657 | regs_buff[7] = macb_readl(bp, IMR); |
| 1658 | |
| 1659 | regs_buff[8] = tail; |
| 1660 | regs_buff[9] = head; |
| 1661 | regs_buff[10] = macb_tx_dma(bp, tail); |
| 1662 | regs_buff[11] = macb_tx_dma(bp, head); |
| 1663 | |
| 1664 | if (macb_is_gem(bp)) { |
| 1665 | regs_buff[12] = gem_readl(bp, USRIO); |
| 1666 | regs_buff[13] = gem_readl(bp, DMACFG); |
| 1667 | } |
| 1668 | } |
| 1669 | |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1670 | const struct ethtool_ops macb_ethtool_ops = { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1671 | .get_settings = macb_get_settings, |
| 1672 | .set_settings = macb_set_settings, |
Nicolas Ferre | d1d1b53 | 2012-10-31 06:04:56 +0000 | [diff] [blame] | 1673 | .get_regs_len = macb_get_regs_len, |
| 1674 | .get_regs = macb_get_regs, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1675 | .get_link = ethtool_op_get_link, |
Richard Cochran | 17f393e | 2012-04-03 22:59:31 +0000 | [diff] [blame] | 1676 | .get_ts_info = ethtool_op_get_ts_info, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1677 | }; |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1678 | EXPORT_SYMBOL_GPL(macb_ethtool_ops); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1679 | |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1680 | int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1681 | { |
| 1682 | struct macb *bp = netdev_priv(dev); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1683 | struct phy_device *phydev = bp->phy_dev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1684 | |
| 1685 | if (!netif_running(dev)) |
| 1686 | return -EINVAL; |
| 1687 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1688 | if (!phydev) |
| 1689 | return -ENODEV; |
| 1690 | |
Richard Cochran | 28b0411 | 2010-07-17 08:48:55 +0000 | [diff] [blame] | 1691 | return phy_mii_ioctl(phydev, rq, cmd); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1692 | } |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1693 | EXPORT_SYMBOL_GPL(macb_ioctl); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1694 | |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1695 | static const struct net_device_ops macb_netdev_ops = { |
| 1696 | .ndo_open = macb_open, |
| 1697 | .ndo_stop = macb_close, |
| 1698 | .ndo_start_xmit = macb_start_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 1699 | .ndo_set_rx_mode = macb_set_rx_mode, |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1700 | .ndo_get_stats = macb_get_stats, |
| 1701 | .ndo_do_ioctl = macb_ioctl, |
| 1702 | .ndo_validate_addr = eth_validate_addr, |
| 1703 | .ndo_change_mtu = eth_change_mtu, |
| 1704 | .ndo_set_mac_address = eth_mac_addr, |
Thomas Petazzoni | 6e8cf5c | 2009-05-04 11:08:41 -0700 | [diff] [blame] | 1705 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1706 | .ndo_poll_controller = macb_poll_controller, |
| 1707 | #endif |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1708 | }; |
| 1709 | |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1710 | #if defined(CONFIG_OF) |
| 1711 | static const struct of_device_id macb_dt_ids[] = { |
| 1712 | { .compatible = "cdns,at32ap7000-macb" }, |
| 1713 | { .compatible = "cdns,at91sam9260-macb" }, |
| 1714 | { .compatible = "cdns,macb" }, |
| 1715 | { .compatible = "cdns,pc302-gem" }, |
| 1716 | { .compatible = "cdns,gem" }, |
| 1717 | { /* sentinel */ } |
| 1718 | }; |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1719 | MODULE_DEVICE_TABLE(of, macb_dt_ids); |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1720 | #endif |
| 1721 | |
Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1722 | static int __init macb_probe(struct platform_device *pdev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1723 | { |
Jamie Iles | 84e0cdb | 2011-03-08 20:17:06 +0000 | [diff] [blame] | 1724 | struct macb_platform_data *pdata; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1725 | struct resource *regs; |
| 1726 | struct net_device *dev; |
| 1727 | struct macb *bp; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1728 | struct phy_device *phydev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1729 | u32 config; |
| 1730 | int err = -ENXIO; |
Jean-Christophe PLAGNIOL-VILLARD | 8ef29f8a | 2012-10-31 06:04:59 +0000 | [diff] [blame] | 1731 | struct pinctrl *pinctrl; |
Guenter Roeck | 5090704 | 2013-04-02 09:35:09 +0000 | [diff] [blame] | 1732 | const char *mac; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1733 | |
| 1734 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1735 | if (!regs) { |
| 1736 | dev_err(&pdev->dev, "no mmio resource defined\n"); |
| 1737 | goto err_out; |
| 1738 | } |
| 1739 | |
Jean-Christophe PLAGNIOL-VILLARD | 8ef29f8a | 2012-10-31 06:04:59 +0000 | [diff] [blame] | 1740 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); |
| 1741 | if (IS_ERR(pinctrl)) { |
| 1742 | err = PTR_ERR(pinctrl); |
| 1743 | if (err == -EPROBE_DEFER) |
| 1744 | goto err_out; |
| 1745 | |
| 1746 | dev_warn(&pdev->dev, "No pinctrl provided\n"); |
| 1747 | } |
| 1748 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1749 | err = -ENOMEM; |
| 1750 | dev = alloc_etherdev(sizeof(*bp)); |
Joe Perches | 41de8d4 | 2012-01-29 13:47:52 +0000 | [diff] [blame] | 1751 | if (!dev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1752 | goto err_out; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1753 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1754 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 1755 | |
| 1756 | /* TODO: Actually, we have some interesting features... */ |
| 1757 | dev->features |= 0; |
| 1758 | |
| 1759 | bp = netdev_priv(dev); |
| 1760 | bp->pdev = pdev; |
| 1761 | bp->dev = dev; |
| 1762 | |
| 1763 | spin_lock_init(&bp->lock); |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 1764 | INIT_WORK(&bp->tx_error_task, macb_tx_error_task); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1765 | |
Jamie Iles | 461845d | 2011-03-08 20:19:23 +0000 | [diff] [blame] | 1766 | bp->pclk = clk_get(&pdev->dev, "pclk"); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1767 | if (IS_ERR(bp->pclk)) { |
| 1768 | dev_err(&pdev->dev, "failed to get macb_clk\n"); |
| 1769 | goto err_out_free_dev; |
| 1770 | } |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1771 | clk_prepare_enable(bp->pclk); |
Jamie Iles | 461845d | 2011-03-08 20:19:23 +0000 | [diff] [blame] | 1772 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1773 | bp->hclk = clk_get(&pdev->dev, "hclk"); |
| 1774 | if (IS_ERR(bp->hclk)) { |
| 1775 | dev_err(&pdev->dev, "failed to get hclk\n"); |
| 1776 | goto err_out_put_pclk; |
| 1777 | } |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1778 | clk_prepare_enable(bp->hclk); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1779 | |
Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 1780 | bp->regs = ioremap(regs->start, resource_size(regs)); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1781 | if (!bp->regs) { |
| 1782 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); |
| 1783 | err = -ENOMEM; |
| 1784 | goto err_out_disable_clocks; |
| 1785 | } |
| 1786 | |
| 1787 | dev->irq = platform_get_irq(pdev, 0); |
Javier Martinez Canillas | ab392d2 | 2011-03-28 16:27:31 +0000 | [diff] [blame] | 1788 | err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1789 | if (err) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1790 | dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", |
| 1791 | dev->irq, err); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1792 | goto err_out_iounmap; |
| 1793 | } |
| 1794 | |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1795 | dev->netdev_ops = &macb_netdev_ops; |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1796 | netif_napi_add(dev, &bp->napi, macb_poll, 64); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1797 | dev->ethtool_ops = &macb_ethtool_ops; |
| 1798 | |
| 1799 | dev->base_addr = regs->start; |
| 1800 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1801 | /* setup appropriated routines according to adapter type */ |
| 1802 | if (macb_is_gem(bp)) { |
| 1803 | bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; |
| 1804 | bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; |
| 1805 | bp->macbgem_ops.mog_init_rings = gem_init_rings; |
| 1806 | bp->macbgem_ops.mog_rx = gem_rx; |
| 1807 | } else { |
| 1808 | bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; |
| 1809 | bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; |
| 1810 | bp->macbgem_ops.mog_init_rings = macb_init_rings; |
| 1811 | bp->macbgem_ops.mog_rx = macb_rx; |
| 1812 | } |
| 1813 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1814 | /* Set MII management clock divider */ |
Jamie Iles | 70c9f3d | 2011-03-09 16:22:54 +0000 | [diff] [blame] | 1815 | config = macb_mdc_clk_div(bp); |
Jamie Iles | 757a03c | 2011-03-09 16:29:59 +0000 | [diff] [blame] | 1816 | config |= macb_dbw(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1817 | macb_writel(bp, NCFGR, config); |
| 1818 | |
Guenter Roeck | 5090704 | 2013-04-02 09:35:09 +0000 | [diff] [blame] | 1819 | mac = of_get_mac_address(pdev->dev.of_node); |
| 1820 | if (mac) |
| 1821 | memcpy(bp->dev->dev_addr, mac, ETH_ALEN); |
| 1822 | else |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1823 | macb_get_hwaddr(bp); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1824 | |
Guenter Roeck | 5090704 | 2013-04-02 09:35:09 +0000 | [diff] [blame] | 1825 | err = of_get_phy_mode(pdev->dev.of_node); |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1826 | if (err < 0) { |
| 1827 | pdata = pdev->dev.platform_data; |
| 1828 | if (pdata && pdata->is_rmii) |
| 1829 | bp->phy_interface = PHY_INTERFACE_MODE_RMII; |
| 1830 | else |
| 1831 | bp->phy_interface = PHY_INTERFACE_MODE_MII; |
| 1832 | } else { |
| 1833 | bp->phy_interface = err; |
| 1834 | } |
| 1835 | |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 1836 | if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) |
| 1837 | macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII)); |
| 1838 | else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1839 | #if defined(CONFIG_ARCH_AT91) |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1840 | macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | |
| 1841 | MACB_BIT(CLKEN))); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1842 | #else |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1843 | macb_or_gem_writel(bp, USRIO, 0); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1844 | #endif |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1845 | else |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1846 | #if defined(CONFIG_ARCH_AT91) |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1847 | macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1848 | #else |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1849 | macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1850 | #endif |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1851 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1852 | err = register_netdev(dev); |
| 1853 | if (err) { |
| 1854 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
| 1855 | goto err_out_free_irq; |
| 1856 | } |
| 1857 | |
Nicolas Ferre | 72ca820 | 2013-04-14 22:04:33 +0000 | [diff] [blame] | 1858 | err = macb_mii_init(bp); |
| 1859 | if (err) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1860 | goto err_out_unregister_netdev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1861 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1862 | platform_set_drvdata(pdev, dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1863 | |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 1864 | netif_carrier_off(dev); |
| 1865 | |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1866 | netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", |
| 1867 | macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, |
| 1868 | dev->irq, dev->dev_addr); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1869 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1870 | phydev = bp->phy_dev; |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1871 | netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
| 1872 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1873 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1874 | return 0; |
| 1875 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1876 | err_out_unregister_netdev: |
| 1877 | unregister_netdev(dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1878 | err_out_free_irq: |
| 1879 | free_irq(dev->irq, dev); |
| 1880 | err_out_iounmap: |
| 1881 | iounmap(bp->regs); |
| 1882 | err_out_disable_clocks: |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1883 | clk_disable_unprepare(bp->hclk); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1884 | clk_put(bp->hclk); |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1885 | clk_disable_unprepare(bp->pclk); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1886 | err_out_put_pclk: |
| 1887 | clk_put(bp->pclk); |
| 1888 | err_out_free_dev: |
| 1889 | free_netdev(dev); |
| 1890 | err_out: |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1891 | return err; |
| 1892 | } |
| 1893 | |
Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1894 | static int __exit macb_remove(struct platform_device *pdev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1895 | { |
| 1896 | struct net_device *dev; |
| 1897 | struct macb *bp; |
| 1898 | |
| 1899 | dev = platform_get_drvdata(pdev); |
| 1900 | |
| 1901 | if (dev) { |
| 1902 | bp = netdev_priv(dev); |
Atsushi Nemoto | 84b7901 | 2008-04-10 23:30:07 +0900 | [diff] [blame] | 1903 | if (bp->phy_dev) |
| 1904 | phy_disconnect(bp->phy_dev); |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1905 | mdiobus_unregister(bp->mii_bus); |
| 1906 | kfree(bp->mii_bus->irq); |
| 1907 | mdiobus_free(bp->mii_bus); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1908 | unregister_netdev(dev); |
| 1909 | free_irq(dev->irq, dev); |
| 1910 | iounmap(bp->regs); |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1911 | clk_disable_unprepare(bp->hclk); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1912 | clk_put(bp->hclk); |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1913 | clk_disable_unprepare(bp->pclk); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1914 | clk_put(bp->pclk); |
| 1915 | free_netdev(dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1916 | } |
| 1917 | |
| 1918 | return 0; |
| 1919 | } |
| 1920 | |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 1921 | #ifdef CONFIG_PM |
| 1922 | static int macb_suspend(struct platform_device *pdev, pm_message_t state) |
| 1923 | { |
| 1924 | struct net_device *netdev = platform_get_drvdata(pdev); |
| 1925 | struct macb *bp = netdev_priv(netdev); |
| 1926 | |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 1927 | netif_carrier_off(netdev); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 1928 | netif_device_detach(netdev); |
| 1929 | |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1930 | clk_disable_unprepare(bp->hclk); |
| 1931 | clk_disable_unprepare(bp->pclk); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 1932 | |
| 1933 | return 0; |
| 1934 | } |
| 1935 | |
| 1936 | static int macb_resume(struct platform_device *pdev) |
| 1937 | { |
| 1938 | struct net_device *netdev = platform_get_drvdata(pdev); |
| 1939 | struct macb *bp = netdev_priv(netdev); |
| 1940 | |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1941 | clk_prepare_enable(bp->pclk); |
| 1942 | clk_prepare_enable(bp->hclk); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 1943 | |
| 1944 | netif_device_attach(netdev); |
| 1945 | |
| 1946 | return 0; |
| 1947 | } |
| 1948 | #else |
| 1949 | #define macb_suspend NULL |
| 1950 | #define macb_resume NULL |
| 1951 | #endif |
| 1952 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1953 | static struct platform_driver macb_driver = { |
Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1954 | .remove = __exit_p(macb_remove), |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 1955 | .suspend = macb_suspend, |
| 1956 | .resume = macb_resume, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1957 | .driver = { |
| 1958 | .name = "macb", |
Kay Sievers | 72abb46 | 2008-04-18 13:50:44 -0700 | [diff] [blame] | 1959 | .owner = THIS_MODULE, |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1960 | .of_match_table = of_match_ptr(macb_dt_ids), |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1961 | }, |
| 1962 | }; |
| 1963 | |
Jingoo Han | b543a8d | 2013-03-04 16:43:18 +0000 | [diff] [blame] | 1964 | module_platform_driver_probe(macb_driver, macb_probe); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1965 | |
| 1966 | MODULE_LICENSE("GPL"); |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1967 | MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); |
Jean Delvare | e05503e | 2011-05-18 16:49:24 +0200 | [diff] [blame] | 1968 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
Kay Sievers | 72abb46 | 2008-04-18 13:50:44 -0700 | [diff] [blame] | 1969 | MODULE_ALIAS("platform:macb"); |