blob: 27364b7572fc157dad85ddddc905510b1c96701c [file] [log] [blame]
Moritz Fischer492caff2018-03-27 14:43:15 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2016-2017, National Instruments Corp.
3 *
4 * Author: Moritz Fischer <mdf@kernel.org>
5 */
6
7#include <linux/etherdevice.h>
8#include <linux/module.h>
9#include <linux/netdevice.h>
10#include <linux/of_address.h>
11#include <linux/of_mdio.h>
12#include <linux/of_net.h>
13#include <linux/of_platform.h>
14#include <linux/of_irq.h>
15#include <linux/skbuff.h>
16#include <linux/phy.h>
17#include <linux/mii.h>
18#include <linux/nvmem-consumer.h>
19#include <linux/ethtool.h>
20#include <linux/iopoll.h>
21
22#define TX_BD_NUM 64
23#define RX_BD_NUM 128
24
25/* Axi DMA Register definitions */
26#define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
27#define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
28#define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
29#define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
30
31#define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
32#define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
33#define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
34#define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
35
36#define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
37#define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
38
39#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
40#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
41#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
42#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
43
44#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
45#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
46
47#define XAXIDMA_DELAY_SHIFT 24
48#define XAXIDMA_COALESCE_SHIFT 16
49
50#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
51#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
52#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
53#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
54
55/* Default TX/RX Threshold and waitbound values for SGDMA mode */
56#define XAXIDMA_DFT_TX_THRESHOLD 24
57#define XAXIDMA_DFT_TX_WAITBOUND 254
58#define XAXIDMA_DFT_RX_THRESHOLD 24
59#define XAXIDMA_DFT_RX_WAITBOUND 254
60
61#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
62#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
63#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
64#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
65#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
66#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
67#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
68#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
69#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
70
71#define NIXGE_REG_CTRL_OFFSET 0x4000
72#define NIXGE_REG_INFO 0x00
73#define NIXGE_REG_MAC_CTL 0x04
74#define NIXGE_REG_PHY_CTL 0x08
75#define NIXGE_REG_LED_CTL 0x0c
76#define NIXGE_REG_MDIO_DATA 0x10
77#define NIXGE_REG_MDIO_ADDR 0x14
78#define NIXGE_REG_MDIO_OP 0x18
79#define NIXGE_REG_MDIO_CTRL 0x1c
80
81#define NIXGE_ID_LED_CTL_EN BIT(0)
82#define NIXGE_ID_LED_CTL_VAL BIT(1)
83
84#define NIXGE_MDIO_CLAUSE45 BIT(12)
85#define NIXGE_MDIO_CLAUSE22 0
86#define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
87#define NIXGE_MDIO_OP_ADDRESS 0
88#define NIXGE_MDIO_C45_WRITE BIT(0)
89#define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
90#define NIXGE_MDIO_C22_WRITE BIT(0)
91#define NIXGE_MDIO_C22_READ BIT(1)
92#define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
93#define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
94
95#define NIXGE_REG_MAC_LSB 0x1000
96#define NIXGE_REG_MAC_MSB 0x1004
97
98/* Packet size info */
99#define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
100#define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
101#define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
102#define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
103
104#define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
105#define NIXGE_MAX_JUMBO_FRAME_SIZE \
106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
107
108struct nixge_hw_dma_bd {
109 u32 next;
110 u32 reserved1;
111 u32 phys;
112 u32 reserved2;
113 u32 reserved3;
114 u32 reserved4;
115 u32 cntrl;
116 u32 status;
117 u32 app0;
118 u32 app1;
119 u32 app2;
120 u32 app3;
121 u32 app4;
122 u32 sw_id_offset;
123 u32 reserved5;
124 u32 reserved6;
125};
126
127struct nixge_tx_skb {
128 struct sk_buff *skb;
129 dma_addr_t mapping;
130 size_t size;
131 bool mapped_as_page;
132};
133
134struct nixge_priv {
135 struct net_device *ndev;
136 struct napi_struct napi;
137 struct device *dev;
138
139 /* Connection to PHY device */
140 struct device_node *phy_node;
141 phy_interface_t phy_mode;
142
143 int link;
144 unsigned int speed;
145 unsigned int duplex;
146
147 /* MDIO bus data */
148 struct mii_bus *mii_bus; /* MII bus reference */
149
150 /* IO registers, dma functions and IRQs */
151 void __iomem *ctrl_regs;
152 void __iomem *dma_regs;
153
154 struct tasklet_struct dma_err_tasklet;
155
156 int tx_irq;
157 int rx_irq;
158 u32 last_link;
159
160 /* Buffer descriptors */
161 struct nixge_hw_dma_bd *tx_bd_v;
162 struct nixge_tx_skb *tx_skb;
163 dma_addr_t tx_bd_p;
164
165 struct nixge_hw_dma_bd *rx_bd_v;
166 dma_addr_t rx_bd_p;
167 u32 tx_bd_ci;
168 u32 tx_bd_tail;
169 u32 rx_bd_ci;
170
171 u32 coalesce_count_rx;
172 u32 coalesce_count_tx;
173};
174
175static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
176{
177 writel(val, priv->dma_regs + offset);
178}
179
180static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
181{
182 return readl(priv->dma_regs + offset);
183}
184
185static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
186{
187 writel(val, priv->ctrl_regs + offset);
188}
189
190static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
191{
192 return readl(priv->ctrl_regs + offset);
193}
194
195#define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
196 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
197 (sleep_us), (timeout_us))
198
199#define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
200 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
201 (sleep_us), (timeout_us))
202
203static void nixge_hw_dma_bd_release(struct net_device *ndev)
204{
205 struct nixge_priv *priv = netdev_priv(ndev);
206 int i;
207
208 for (i = 0; i < RX_BD_NUM; i++) {
209 dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
210 NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
211 dev_kfree_skb((struct sk_buff *)
212 (priv->rx_bd_v[i].sw_id_offset));
213 }
214
215 if (priv->rx_bd_v)
216 dma_free_coherent(ndev->dev.parent,
217 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
218 priv->rx_bd_v,
219 priv->rx_bd_p);
220
221 if (priv->tx_skb)
222 devm_kfree(ndev->dev.parent, priv->tx_skb);
223
224 if (priv->tx_bd_v)
225 dma_free_coherent(ndev->dev.parent,
226 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
227 priv->tx_bd_v,
228 priv->tx_bd_p);
229}
230
231static int nixge_hw_dma_bd_init(struct net_device *ndev)
232{
233 struct nixge_priv *priv = netdev_priv(ndev);
234 struct sk_buff *skb;
235 u32 cr;
236 int i;
237
238 /* Reset the indexes which are used for accessing the BDs */
239 priv->tx_bd_ci = 0;
240 priv->tx_bd_tail = 0;
241 priv->rx_bd_ci = 0;
242
243 /* Allocate the Tx and Rx buffer descriptors. */
244 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
245 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
246 &priv->tx_bd_p, GFP_KERNEL);
247 if (!priv->tx_bd_v)
248 goto out;
249
250 priv->tx_skb = devm_kzalloc(ndev->dev.parent,
251 sizeof(*priv->tx_skb) *
252 TX_BD_NUM,
253 GFP_KERNEL);
254 if (!priv->tx_skb)
255 goto out;
256
257 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
258 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
259 &priv->rx_bd_p, GFP_KERNEL);
260 if (!priv->rx_bd_v)
261 goto out;
262
263 for (i = 0; i < TX_BD_NUM; i++) {
264 priv->tx_bd_v[i].next = priv->tx_bd_p +
265 sizeof(*priv->tx_bd_v) *
266 ((i + 1) % TX_BD_NUM);
267 }
268
269 for (i = 0; i < RX_BD_NUM; i++) {
270 priv->rx_bd_v[i].next = priv->rx_bd_p +
271 sizeof(*priv->rx_bd_v) *
272 ((i + 1) % RX_BD_NUM);
273
274 skb = netdev_alloc_skb_ip_align(ndev,
275 NIXGE_MAX_JUMBO_FRAME_SIZE);
276 if (!skb)
277 goto out;
278
279 priv->rx_bd_v[i].sw_id_offset = (u32)skb;
280 priv->rx_bd_v[i].phys =
281 dma_map_single(ndev->dev.parent,
282 skb->data,
283 NIXGE_MAX_JUMBO_FRAME_SIZE,
284 DMA_FROM_DEVICE);
285 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
286 }
287
288 /* Start updating the Rx channel control register */
289 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
290 /* Update the interrupt coalesce count */
291 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
292 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
293 /* Update the delay timer count */
294 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
295 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
296 /* Enable coalesce, delay timer and error interrupts */
297 cr |= XAXIDMA_IRQ_ALL_MASK;
298 /* Write to the Rx channel control register */
299 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
300
301 /* Start updating the Tx channel control register */
302 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
303 /* Update the interrupt coalesce count */
304 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
305 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
306 /* Update the delay timer count */
307 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
308 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
309 /* Enable coalesce, delay timer and error interrupts */
310 cr |= XAXIDMA_IRQ_ALL_MASK;
311 /* Write to the Tx channel control register */
312 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
313
314 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
315 * halted state. This will make the Rx side ready for reception.
316 */
317 nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
318 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
319 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
320 cr | XAXIDMA_CR_RUNSTOP_MASK);
321 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
322 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
323
324 /* Write to the RS (Run-stop) bit in the Tx channel control register.
325 * Tx channel is now ready to run. But only after we write to the
326 * tail pointer register that the Tx channel will start transmitting.
327 */
328 nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
329 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
330 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
331 cr | XAXIDMA_CR_RUNSTOP_MASK);
332
333 return 0;
334out:
335 nixge_hw_dma_bd_release(ndev);
336 return -ENOMEM;
337}
338
339static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
340{
341 u32 status;
342 int err;
343
344 /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
345 * The reset process of Axi DMA takes a while to complete as all
346 * pending commands/transfers will be flushed or completed during
347 * this reset process.
348 */
349 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
350 err = nixge_dma_poll_timeout(priv, offset, status,
351 !(status & XAXIDMA_CR_RESET_MASK), 10,
352 1000);
353 if (err)
354 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
355}
356
357static void nixge_device_reset(struct net_device *ndev)
358{
359 struct nixge_priv *priv = netdev_priv(ndev);
360
361 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
362 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
363
364 if (nixge_hw_dma_bd_init(ndev))
365 netdev_err(ndev, "%s: descriptor allocation failed\n",
366 __func__);
367
368 netif_trans_update(ndev);
369}
370
371static void nixge_handle_link_change(struct net_device *ndev)
372{
373 struct nixge_priv *priv = netdev_priv(ndev);
374 struct phy_device *phydev = ndev->phydev;
375
376 if (phydev->link != priv->link || phydev->speed != priv->speed ||
377 phydev->duplex != priv->duplex) {
378 priv->link = phydev->link;
379 priv->speed = phydev->speed;
380 priv->duplex = phydev->duplex;
381 phy_print_status(phydev);
382 }
383}
384
385static void nixge_tx_skb_unmap(struct nixge_priv *priv,
386 struct nixge_tx_skb *tx_skb)
387{
388 if (tx_skb->mapping) {
389 if (tx_skb->mapped_as_page)
390 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
391 tx_skb->size, DMA_TO_DEVICE);
392 else
393 dma_unmap_single(priv->ndev->dev.parent,
394 tx_skb->mapping,
395 tx_skb->size, DMA_TO_DEVICE);
396 tx_skb->mapping = 0;
397 }
398
399 if (tx_skb->skb) {
400 dev_kfree_skb_any(tx_skb->skb);
401 tx_skb->skb = NULL;
402 }
403}
404
405static void nixge_start_xmit_done(struct net_device *ndev)
406{
407 struct nixge_priv *priv = netdev_priv(ndev);
408 struct nixge_hw_dma_bd *cur_p;
409 struct nixge_tx_skb *tx_skb;
410 unsigned int status = 0;
411 u32 packets = 0;
412 u32 size = 0;
413
414 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
415 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
416
417 status = cur_p->status;
418
419 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
420 nixge_tx_skb_unmap(priv, tx_skb);
421 cur_p->status = 0;
422
423 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
424 packets++;
425
426 ++priv->tx_bd_ci;
427 priv->tx_bd_ci %= TX_BD_NUM;
428 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
429 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
430 status = cur_p->status;
431 }
432
433 ndev->stats.tx_packets += packets;
434 ndev->stats.tx_bytes += size;
435
436 if (packets)
437 netif_wake_queue(ndev);
438}
439
440static int nixge_check_tx_bd_space(struct nixge_priv *priv,
441 int num_frag)
442{
443 struct nixge_hw_dma_bd *cur_p;
444
445 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
446 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
447 return NETDEV_TX_BUSY;
448 return 0;
449}
450
451static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
452{
453 struct nixge_priv *priv = netdev_priv(ndev);
454 struct nixge_hw_dma_bd *cur_p;
455 struct nixge_tx_skb *tx_skb;
456 dma_addr_t tail_p;
457 skb_frag_t *frag;
458 u32 num_frag;
459 u32 ii;
460
461 num_frag = skb_shinfo(skb)->nr_frags;
462 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
463 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
464
465 if (nixge_check_tx_bd_space(priv, num_frag)) {
466 if (!netif_queue_stopped(ndev))
467 netif_stop_queue(ndev);
468 return NETDEV_TX_OK;
469 }
470
471 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
472 skb_headlen(skb), DMA_TO_DEVICE);
473 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
474 goto drop;
475
476 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
477
478 tx_skb->skb = NULL;
479 tx_skb->mapping = cur_p->phys;
480 tx_skb->size = skb_headlen(skb);
481 tx_skb->mapped_as_page = false;
482
483 for (ii = 0; ii < num_frag; ii++) {
484 ++priv->tx_bd_tail;
485 priv->tx_bd_tail %= TX_BD_NUM;
486 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
487 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
488 frag = &skb_shinfo(skb)->frags[ii];
489
490 cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
491 skb_frag_size(frag),
492 DMA_TO_DEVICE);
493 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
494 goto frag_err;
495
496 cur_p->cntrl = skb_frag_size(frag);
497
498 tx_skb->skb = NULL;
499 tx_skb->mapping = cur_p->phys;
500 tx_skb->size = skb_frag_size(frag);
501 tx_skb->mapped_as_page = true;
502 }
503
504 /* last buffer of the frame */
505 tx_skb->skb = skb;
506
507 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
508 cur_p->app4 = (unsigned long)skb;
509
510 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
511 /* Start the transfer */
512 nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
513 ++priv->tx_bd_tail;
514 priv->tx_bd_tail %= TX_BD_NUM;
515
516 return NETDEV_TX_OK;
517frag_err:
518 for (; ii > 0; ii--) {
519 if (priv->tx_bd_tail)
520 priv->tx_bd_tail--;
521 else
522 priv->tx_bd_tail = TX_BD_NUM - 1;
523
524 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
525 nixge_tx_skb_unmap(priv, tx_skb);
526
527 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
528 cur_p->status = 0;
529 }
530 dma_unmap_single(priv->ndev->dev.parent,
531 tx_skb->mapping,
532 tx_skb->size, DMA_TO_DEVICE);
533drop:
534 ndev->stats.tx_dropped++;
535 return NETDEV_TX_OK;
536}
537
538static int nixge_recv(struct net_device *ndev, int budget)
539{
540 struct nixge_priv *priv = netdev_priv(ndev);
541 struct sk_buff *skb, *new_skb;
542 struct nixge_hw_dma_bd *cur_p;
543 dma_addr_t tail_p = 0;
544 u32 packets = 0;
545 u32 length = 0;
546 u32 size = 0;
547
548 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
549
550 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
551 budget > packets)) {
552 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
553 priv->rx_bd_ci;
554
555 skb = (struct sk_buff *)(cur_p->sw_id_offset);
556
557 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
558 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
559 length = NIXGE_MAX_JUMBO_FRAME_SIZE;
560
561 dma_unmap_single(ndev->dev.parent, cur_p->phys,
562 NIXGE_MAX_JUMBO_FRAME_SIZE,
563 DMA_FROM_DEVICE);
564
565 skb_put(skb, length);
566
567 skb->protocol = eth_type_trans(skb, ndev);
568 skb_checksum_none_assert(skb);
569
570 /* For now mark them as CHECKSUM_NONE since
571 * we don't have offload capabilities
572 */
573 skb->ip_summed = CHECKSUM_NONE;
574
575 napi_gro_receive(&priv->napi, skb);
576
577 size += length;
578 packets++;
579
580 new_skb = netdev_alloc_skb_ip_align(ndev,
581 NIXGE_MAX_JUMBO_FRAME_SIZE);
582 if (!new_skb)
583 return packets;
584
585 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
586 NIXGE_MAX_JUMBO_FRAME_SIZE,
587 DMA_FROM_DEVICE);
588 if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
589 /* FIXME: bail out and clean up */
590 netdev_err(ndev, "Failed to map ...\n");
591 }
592 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
593 cur_p->status = 0;
594 cur_p->sw_id_offset = (u32)new_skb;
595
596 ++priv->rx_bd_ci;
597 priv->rx_bd_ci %= RX_BD_NUM;
598 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
599 }
600
601 ndev->stats.rx_packets += packets;
602 ndev->stats.rx_bytes += size;
603
604 if (tail_p)
605 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
606
607 return packets;
608}
609
610static int nixge_poll(struct napi_struct *napi, int budget)
611{
612 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
613 int work_done;
614 u32 status, cr;
615
616 work_done = 0;
617
618 work_done = nixge_recv(priv->ndev, budget);
619 if (work_done < budget) {
620 napi_complete_done(napi, work_done);
621 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
622
623 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
624 /* If there's more, reschedule, but clear */
625 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
626 napi_reschedule(napi);
627 } else {
628 /* if not, turn on RX IRQs again ... */
629 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
630 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
631 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
632 }
633 }
634
635 return work_done;
636}
637
638static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
639{
640 struct nixge_priv *priv = netdev_priv(_ndev);
641 struct net_device *ndev = _ndev;
642 unsigned int status;
643 u32 cr;
644
645 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
646 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
647 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
648 nixge_start_xmit_done(priv->ndev);
649 goto out;
650 }
651 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
652 netdev_err(ndev, "No interrupts asserted in Tx path\n");
653 return IRQ_NONE;
654 }
655 if (status & XAXIDMA_IRQ_ERROR_MASK) {
656 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
657 netdev_err(ndev, "Current BD is at: 0x%x\n",
658 (priv->tx_bd_v[priv->tx_bd_ci]).phys);
659
660 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
661 /* Disable coalesce, delay timer and error interrupts */
662 cr &= (~XAXIDMA_IRQ_ALL_MASK);
663 /* Write to the Tx channel control register */
664 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
665
666 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
667 /* Disable coalesce, delay timer and error interrupts */
668 cr &= (~XAXIDMA_IRQ_ALL_MASK);
669 /* Write to the Rx channel control register */
670 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
671
672 tasklet_schedule(&priv->dma_err_tasklet);
673 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
674 }
675out:
676 return IRQ_HANDLED;
677}
678
679static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
680{
681 struct nixge_priv *priv = netdev_priv(_ndev);
682 struct net_device *ndev = _ndev;
683 unsigned int status;
684 u32 cr;
685
686 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
687 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
688 /* Turn of IRQs because NAPI */
689 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
690 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
691 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
692 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
693
694 if (napi_schedule_prep(&priv->napi))
695 __napi_schedule(&priv->napi);
696 goto out;
697 }
698 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
699 netdev_err(ndev, "No interrupts asserted in Rx path\n");
700 return IRQ_NONE;
701 }
702 if (status & XAXIDMA_IRQ_ERROR_MASK) {
703 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
704 netdev_err(ndev, "Current BD is at: 0x%x\n",
705 (priv->rx_bd_v[priv->rx_bd_ci]).phys);
706
707 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
708 /* Disable coalesce, delay timer and error interrupts */
709 cr &= (~XAXIDMA_IRQ_ALL_MASK);
710 /* Finally write to the Tx channel control register */
711 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
712
713 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
714 /* Disable coalesce, delay timer and error interrupts */
715 cr &= (~XAXIDMA_IRQ_ALL_MASK);
716 /* write to the Rx channel control register */
717 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
718
719 tasklet_schedule(&priv->dma_err_tasklet);
720 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
721 }
722out:
723 return IRQ_HANDLED;
724}
725
726static void nixge_dma_err_handler(unsigned long data)
727{
728 struct nixge_priv *lp = (struct nixge_priv *)data;
729 struct nixge_hw_dma_bd *cur_p;
730 struct nixge_tx_skb *tx_skb;
731 u32 cr, i;
732
733 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
734 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
735
736 for (i = 0; i < TX_BD_NUM; i++) {
737 cur_p = &lp->tx_bd_v[i];
738 tx_skb = &lp->tx_skb[i];
739 nixge_tx_skb_unmap(lp, tx_skb);
740
741 cur_p->phys = 0;
742 cur_p->cntrl = 0;
743 cur_p->status = 0;
744 cur_p->app0 = 0;
745 cur_p->app1 = 0;
746 cur_p->app2 = 0;
747 cur_p->app3 = 0;
748 cur_p->app4 = 0;
749 cur_p->sw_id_offset = 0;
750 }
751
752 for (i = 0; i < RX_BD_NUM; i++) {
753 cur_p = &lp->rx_bd_v[i];
754 cur_p->status = 0;
755 cur_p->app0 = 0;
756 cur_p->app1 = 0;
757 cur_p->app2 = 0;
758 cur_p->app3 = 0;
759 cur_p->app4 = 0;
760 }
761
762 lp->tx_bd_ci = 0;
763 lp->tx_bd_tail = 0;
764 lp->rx_bd_ci = 0;
765
766 /* Start updating the Rx channel control register */
767 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
768 /* Update the interrupt coalesce count */
769 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
770 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
771 /* Update the delay timer count */
772 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
773 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
774 /* Enable coalesce, delay timer and error interrupts */
775 cr |= XAXIDMA_IRQ_ALL_MASK;
776 /* Finally write to the Rx channel control register */
777 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
778
779 /* Start updating the Tx channel control register */
780 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
781 /* Update the interrupt coalesce count */
782 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
783 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
784 /* Update the delay timer count */
785 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
786 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
787 /* Enable coalesce, delay timer and error interrupts */
788 cr |= XAXIDMA_IRQ_ALL_MASK;
789 /* Finally write to the Tx channel control register */
790 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
791
792 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
793 * halted state. This will make the Rx side ready for reception.
794 */
795 nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
796 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
797 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
798 cr | XAXIDMA_CR_RUNSTOP_MASK);
799 nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
800 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
801
802 /* Write to the RS (Run-stop) bit in the Tx channel control register.
803 * Tx channel is now ready to run. But only after we write to the
804 * tail pointer register that the Tx channel will start transmitting
805 */
806 nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
807 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
808 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
809 cr | XAXIDMA_CR_RUNSTOP_MASK);
810}
811
812static int nixge_open(struct net_device *ndev)
813{
814 struct nixge_priv *priv = netdev_priv(ndev);
815 struct phy_device *phy;
816 int ret;
817
818 nixge_device_reset(ndev);
819
820 phy = of_phy_connect(ndev, priv->phy_node,
821 &nixge_handle_link_change, 0, priv->phy_mode);
822 if (!phy)
823 return -ENODEV;
824
825 phy_start(phy);
826
827 /* Enable tasklets for Axi DMA error handling */
828 tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
829 (unsigned long)priv);
830
831 napi_enable(&priv->napi);
832
833 /* Enable interrupts for Axi DMA Tx */
834 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
835 if (ret)
836 goto err_tx_irq;
837 /* Enable interrupts for Axi DMA Rx */
838 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
839 if (ret)
840 goto err_rx_irq;
841
842 netif_start_queue(ndev);
843
844 return 0;
845
846err_rx_irq:
847 free_irq(priv->tx_irq, ndev);
848err_tx_irq:
849 phy_stop(phy);
850 phy_disconnect(phy);
851 tasklet_kill(&priv->dma_err_tasklet);
852 netdev_err(ndev, "request_irq() failed\n");
853 return ret;
854}
855
856static int nixge_stop(struct net_device *ndev)
857{
858 struct nixge_priv *priv = netdev_priv(ndev);
859 u32 cr;
860
861 netif_stop_queue(ndev);
862 napi_disable(&priv->napi);
863
864 if (ndev->phydev) {
865 phy_stop(ndev->phydev);
866 phy_disconnect(ndev->phydev);
867 }
868
869 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
870 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
871 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
872 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
873 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
874 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
875
876 tasklet_kill(&priv->dma_err_tasklet);
877
878 free_irq(priv->tx_irq, ndev);
879 free_irq(priv->rx_irq, ndev);
880
881 nixge_hw_dma_bd_release(ndev);
882
883 return 0;
884}
885
886static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
887{
888 if (netif_running(ndev))
889 return -EBUSY;
890
891 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
892 NIXGE_MAX_JUMBO_FRAME_SIZE)
893 return -EINVAL;
894
895 ndev->mtu = new_mtu;
896
897 return 0;
898}
899
900static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
901{
902 struct nixge_priv *priv = netdev_priv(ndev);
903
904 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
905 (ndev->dev_addr[2]) << 24 |
906 (ndev->dev_addr[3] << 16) |
907 (ndev->dev_addr[4] << 8) |
908 (ndev->dev_addr[5] << 0));
909
910 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
911 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
912
913 return 0;
914}
915
916static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
917{
918 int err;
919
920 err = eth_mac_addr(ndev, p);
921 if (!err)
922 __nixge_hw_set_mac_address(ndev);
923
924 return err;
925}
926
927static const struct net_device_ops nixge_netdev_ops = {
928 .ndo_open = nixge_open,
929 .ndo_stop = nixge_stop,
930 .ndo_start_xmit = nixge_start_xmit,
931 .ndo_change_mtu = nixge_change_mtu,
932 .ndo_set_mac_address = nixge_net_set_mac_address,
933 .ndo_validate_addr = eth_validate_addr,
934};
935
936static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
937 struct ethtool_drvinfo *ed)
938{
939 strlcpy(ed->driver, "nixge", sizeof(ed->driver));
940 strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
941}
942
943static int nixge_ethtools_get_coalesce(struct net_device *ndev,
944 struct ethtool_coalesce *ecoalesce)
945{
946 struct nixge_priv *priv = netdev_priv(ndev);
947 u32 regval = 0;
948
949 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
950 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
951 >> XAXIDMA_COALESCE_SHIFT;
952 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
953 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
954 >> XAXIDMA_COALESCE_SHIFT;
955 return 0;
956}
957
958static int nixge_ethtools_set_coalesce(struct net_device *ndev,
959 struct ethtool_coalesce *ecoalesce)
960{
961 struct nixge_priv *priv = netdev_priv(ndev);
962
963 if (netif_running(ndev)) {
964 netdev_err(ndev,
965 "Please stop netif before applying configuration\n");
966 return -EBUSY;
967 }
968
969 if (ecoalesce->rx_coalesce_usecs ||
970 ecoalesce->rx_coalesce_usecs_irq ||
971 ecoalesce->rx_max_coalesced_frames_irq ||
972 ecoalesce->tx_coalesce_usecs ||
973 ecoalesce->tx_coalesce_usecs_irq ||
974 ecoalesce->tx_max_coalesced_frames_irq ||
975 ecoalesce->stats_block_coalesce_usecs ||
976 ecoalesce->use_adaptive_rx_coalesce ||
977 ecoalesce->use_adaptive_tx_coalesce ||
978 ecoalesce->pkt_rate_low ||
979 ecoalesce->rx_coalesce_usecs_low ||
980 ecoalesce->rx_max_coalesced_frames_low ||
981 ecoalesce->tx_coalesce_usecs_low ||
982 ecoalesce->tx_max_coalesced_frames_low ||
983 ecoalesce->pkt_rate_high ||
984 ecoalesce->rx_coalesce_usecs_high ||
985 ecoalesce->rx_max_coalesced_frames_high ||
986 ecoalesce->tx_coalesce_usecs_high ||
987 ecoalesce->tx_max_coalesced_frames_high ||
988 ecoalesce->rate_sample_interval)
989 return -EOPNOTSUPP;
990 if (ecoalesce->rx_max_coalesced_frames)
991 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
992 if (ecoalesce->tx_max_coalesced_frames)
993 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
994
995 return 0;
996}
997
998static int nixge_ethtools_set_phys_id(struct net_device *ndev,
999 enum ethtool_phys_id_state state)
1000{
1001 struct nixge_priv *priv = netdev_priv(ndev);
1002 u32 ctrl;
1003
1004 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
1005 switch (state) {
1006 case ETHTOOL_ID_ACTIVE:
1007 ctrl |= NIXGE_ID_LED_CTL_EN;
1008 /* Enable identification LED override*/
1009 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1010 return 2;
1011
1012 case ETHTOOL_ID_ON:
1013 ctrl |= NIXGE_ID_LED_CTL_VAL;
1014 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1015 break;
1016
1017 case ETHTOOL_ID_OFF:
1018 ctrl &= ~NIXGE_ID_LED_CTL_VAL;
1019 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1020 break;
1021
1022 case ETHTOOL_ID_INACTIVE:
1023 /* Restore LED settings */
1024 ctrl &= ~NIXGE_ID_LED_CTL_EN;
1025 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1026 break;
1027 }
1028
1029 return 0;
1030}
1031
1032static const struct ethtool_ops nixge_ethtool_ops = {
1033 .get_drvinfo = nixge_ethtools_get_drvinfo,
1034 .get_coalesce = nixge_ethtools_get_coalesce,
1035 .set_coalesce = nixge_ethtools_set_coalesce,
1036 .set_phys_id = nixge_ethtools_set_phys_id,
1037 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1038 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1039 .get_link = ethtool_op_get_link,
1040};
1041
1042static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
1043{
1044 struct nixge_priv *priv = bus->priv;
1045 u32 status, tmp;
1046 int err;
1047 u16 device;
1048
1049 if (reg & MII_ADDR_C45) {
1050 device = (reg >> 16) & 0x1f;
1051
1052 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1053
1054 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1055 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1056
1057 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1058 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1059
1060 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1061 !status, 10, 1000);
1062 if (err) {
1063 dev_err(priv->dev, "timeout setting address");
1064 return err;
1065 }
1066
1067 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
1068 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1069 } else {
1070 device = reg & 0x1f;
1071
1072 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
1073 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1074 }
1075
1076 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1077 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1078
1079 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1080 !status, 10, 1000);
1081 if (err) {
1082 dev_err(priv->dev, "timeout setting read command");
1083 return err;
1084 }
1085
1086 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
1087
1088 return status;
1089}
1090
1091static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
1092{
1093 struct nixge_priv *priv = bus->priv;
1094 u32 status, tmp;
1095 u16 device;
1096 int err;
1097
1098 if (reg & MII_ADDR_C45) {
1099 device = (reg >> 16) & 0x1f;
1100
1101 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1102
1103 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1104 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1105
1106 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1107 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1108
1109 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1110 !status, 10, 1000);
1111 if (err) {
1112 dev_err(priv->dev, "timeout setting address");
1113 return err;
1114 }
1115
1116 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
1117 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1118
1119 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1120 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1121 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1122 !status, 10, 1000);
1123 if (err)
1124 dev_err(priv->dev, "timeout setting write command");
1125 } else {
1126 device = reg & 0x1f;
1127
1128 tmp = NIXGE_MDIO_CLAUSE22 |
1129 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
1130 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1131
1132 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1133 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1134 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1135
1136 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1137 !status, 10, 1000);
1138 if (err)
1139 dev_err(priv->dev, "timeout setting write command");
1140 }
1141
1142 return err;
1143}
1144
1145static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
1146{
1147 struct mii_bus *bus;
1148
1149 bus = devm_mdiobus_alloc(priv->dev);
1150 if (!bus)
1151 return -ENOMEM;
1152
1153 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
1154 bus->priv = priv;
1155 bus->name = "nixge_mii_bus";
1156 bus->read = nixge_mdio_read;
1157 bus->write = nixge_mdio_write;
1158 bus->parent = priv->dev;
1159
1160 priv->mii_bus = bus;
1161
1162 return of_mdiobus_register(bus, np);
1163}
1164
1165static void *nixge_get_nvmem_address(struct device *dev)
1166{
1167 struct nvmem_cell *cell;
1168 size_t cell_size;
1169 char *mac;
1170
1171 cell = nvmem_cell_get(dev, "address");
1172 if (IS_ERR(cell))
1173 return cell;
1174
1175 mac = nvmem_cell_read(cell, &cell_size);
1176 nvmem_cell_put(cell);
1177
1178 return mac;
1179}
1180
1181static int nixge_probe(struct platform_device *pdev)
1182{
1183 struct nixge_priv *priv;
1184 struct net_device *ndev;
1185 struct resource *dmares;
1186 const char *mac_addr;
1187 int err;
1188
1189 ndev = alloc_etherdev(sizeof(*priv));
1190 if (!ndev)
1191 return -ENOMEM;
1192
1193 platform_set_drvdata(pdev, ndev);
1194 SET_NETDEV_DEV(ndev, &pdev->dev);
1195
1196 ndev->features = NETIF_F_SG;
1197 ndev->netdev_ops = &nixge_netdev_ops;
1198 ndev->ethtool_ops = &nixge_ethtool_ops;
1199
1200 /* MTU range: 64 - 9000 */
1201 ndev->min_mtu = 64;
1202 ndev->max_mtu = NIXGE_JUMBO_MTU;
1203
1204 mac_addr = nixge_get_nvmem_address(&pdev->dev);
1205 if (mac_addr && is_valid_ether_addr(mac_addr))
1206 ether_addr_copy(ndev->dev_addr, mac_addr);
1207 else
1208 eth_hw_addr_random(ndev);
1209
1210 priv = netdev_priv(ndev);
1211 priv->ndev = ndev;
1212 priv->dev = &pdev->dev;
1213
1214 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1215
1216 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1217 priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
1218 if (IS_ERR(priv->dma_regs)) {
1219 netdev_err(ndev, "failed to map dma regs\n");
1220 return PTR_ERR(priv->dma_regs);
1221 }
1222 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
1223 __nixge_hw_set_mac_address(ndev);
1224
1225 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1226 if (priv->tx_irq < 0) {
1227 netdev_err(ndev, "could not find 'tx' irq");
1228 return priv->tx_irq;
1229 }
1230
1231 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1232 if (priv->rx_irq < 0) {
1233 netdev_err(ndev, "could not find 'rx' irq");
1234 return priv->rx_irq;
1235 }
1236
1237 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1238 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1239
1240 err = nixge_mdio_setup(priv, pdev->dev.of_node);
1241 if (err) {
1242 netdev_err(ndev, "error registering mdio bus");
1243 goto free_netdev;
1244 }
1245
1246 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1247 if (priv->phy_mode < 0) {
1248 netdev_err(ndev, "not find \"phy-mode\" property\n");
1249 err = -EINVAL;
1250 goto unregister_mdio;
1251 }
1252
1253 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1254 if (!priv->phy_node) {
1255 netdev_err(ndev, "not find \"phy-handle\" property\n");
1256 err = -EINVAL;
1257 goto unregister_mdio;
1258 }
1259
1260 err = register_netdev(priv->ndev);
1261 if (err) {
1262 netdev_err(ndev, "register_netdev() error (%i)\n", err);
1263 goto unregister_mdio;
1264 }
1265
1266 return 0;
1267
1268unregister_mdio:
1269 mdiobus_unregister(priv->mii_bus);
1270
1271free_netdev:
1272 free_netdev(ndev);
1273
1274 return err;
1275}
1276
1277static int nixge_remove(struct platform_device *pdev)
1278{
1279 struct net_device *ndev = platform_get_drvdata(pdev);
1280 struct nixge_priv *priv = netdev_priv(ndev);
1281
1282 unregister_netdev(ndev);
1283
1284 mdiobus_unregister(priv->mii_bus);
1285
1286 free_netdev(ndev);
1287
1288 return 0;
1289}
1290
1291/* Match table for of_platform binding */
1292static const struct of_device_id nixge_dt_ids[] = {
1293 { .compatible = "ni,xge-enet-2.00", },
1294 {},
1295};
1296MODULE_DEVICE_TABLE(of, nixge_dt_ids);
1297
1298static struct platform_driver nixge_driver = {
1299 .probe = nixge_probe,
1300 .remove = nixge_remove,
1301 .driver = {
1302 .name = "nixge",
1303 .of_match_table = of_match_ptr(nixge_dt_ids),
1304 },
1305};
1306module_platform_driver(nixge_driver);
1307
1308MODULE_LICENSE("GPL v2");
1309MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1310MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");