blob: 87b142a312e0a91c042df0d85afee720b5230184 [file] [log] [blame]
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -08001/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "main.h"
23
24static const struct acpi_device_id xge_acpi_match[];
25
26static int xge_get_resources(struct xge_pdata *pdata)
27{
28 struct platform_device *pdev;
29 struct net_device *ndev;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080030 int phy_mode, ret = 0;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -080031 struct resource *res;
32 struct device *dev;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080033
34 pdev = pdata->pdev;
35 dev = &pdev->dev;
36 ndev = pdata->ndev;
37
38 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
39 if (!res) {
40 dev_err(dev, "Resource enet_csr not defined\n");
41 return -ENODEV;
42 }
43
44 pdata->resources.base_addr = devm_ioremap(dev, res->start,
45 resource_size(res));
46 if (!pdata->resources.base_addr) {
47 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
48 return -ENOMEM;
49 }
50
51 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
52 eth_hw_addr_random(ndev);
53
54 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
55
56 phy_mode = device_get_phy_mode(dev);
57 if (phy_mode < 0) {
58 dev_err(dev, "Unable to get phy-connection-type\n");
59 return phy_mode;
60 }
61 pdata->resources.phy_mode = phy_mode;
62
63 if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
64 dev_err(dev, "Incorrect phy-connection-type specified\n");
65 return -ENODEV;
66 }
67
68 ret = platform_get_irq(pdev, 0);
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -070069 if (ret < 0) {
70 dev_err(dev, "Unable to get irq\n");
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080071 return ret;
72 }
73 pdata->resources.irq = ret;
74
75 return 0;
76}
77
78static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
79{
80 struct xge_pdata *pdata = netdev_priv(ndev);
81 struct xge_desc_ring *ring = pdata->rx_ring;
82 const u8 slots = XGENE_ENET_NUM_DESC - 1;
83 struct device *dev = &pdata->pdev->dev;
84 struct xge_raw_desc *raw_desc;
85 u64 addr_lo, addr_hi;
86 u8 tail = ring->tail;
87 struct sk_buff *skb;
88 dma_addr_t dma_addr;
89 u16 len;
90 int i;
91
92 for (i = 0; i < nbuf; i++) {
93 raw_desc = &ring->raw_desc[tail];
94
95 len = XGENE_ENET_STD_MTU;
96 skb = netdev_alloc_skb(ndev, len);
97 if (unlikely(!skb))
98 return -ENOMEM;
99
100 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
101 if (dma_mapping_error(dev, dma_addr)) {
102 netdev_err(ndev, "DMA mapping error\n");
103 dev_kfree_skb_any(skb);
104 return -EINVAL;
105 }
106
107 ring->pkt_info[tail].skb = skb;
108 ring->pkt_info[tail].dma_addr = dma_addr;
109
110 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
111 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
112 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
113 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
114 SET_BITS(PKT_ADDRH,
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800115 upper_32_bits(dma_addr)));
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800116
117 dma_wmb();
118 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
119 SET_BITS(E, 1));
120 tail = (tail + 1) & slots;
121 }
122
123 ring->tail = tail;
124
125 return 0;
126}
127
128static int xge_init_hw(struct net_device *ndev)
129{
130 struct xge_pdata *pdata = netdev_priv(ndev);
131 int ret;
132
133 ret = xge_port_reset(ndev);
134 if (ret)
135 return ret;
136
137 xge_port_init(ndev);
138 pdata->nbufs = NUM_BUFS;
139
140 return 0;
141}
142
143static irqreturn_t xge_irq(const int irq, void *data)
144{
145 struct xge_pdata *pdata = data;
146
147 if (napi_schedule_prep(&pdata->napi)) {
148 xge_intr_disable(pdata);
149 __napi_schedule(&pdata->napi);
150 }
151
152 return IRQ_HANDLED;
153}
154
155static int xge_request_irq(struct net_device *ndev)
156{
157 struct xge_pdata *pdata = netdev_priv(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800158 int ret;
159
160 snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
161
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700162 ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
163 pdata);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800164 if (ret)
165 netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
166
167 return ret;
168}
169
170static void xge_free_irq(struct net_device *ndev)
171{
172 struct xge_pdata *pdata = netdev_priv(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800173
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700174 free_irq(pdata->resources.irq, pdata);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800175}
176
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800177static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
178{
179 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
180 (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
181 return true;
182
183 return false;
184}
185
186static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
187{
188 struct xge_pdata *pdata = netdev_priv(ndev);
189 struct device *dev = &pdata->pdev->dev;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800190 struct xge_desc_ring *tx_ring;
191 struct xge_raw_desc *raw_desc;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -0800192 static dma_addr_t dma_addr;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800193 u64 addr_lo, addr_hi;
194 void *pkt_buf;
195 u8 tail;
196 u16 len;
197
198 tx_ring = pdata->tx_ring;
199 tail = tx_ring->tail;
200 len = skb_headlen(skb);
201 raw_desc = &tx_ring->raw_desc[tail];
202
203 if (!is_tx_slot_available(raw_desc)) {
204 netif_stop_queue(ndev);
205 return NETDEV_TX_BUSY;
206 }
207
208 /* Packet buffers should be 64B aligned */
Luis Chamberlain750afb02019-01-04 09:23:09 +0100209 pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
210 GFP_ATOMIC);
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800211 if (unlikely(!pkt_buf)) {
212 dev_kfree_skb_any(skb);
213 return NETDEV_TX_OK;
214 }
215 memcpy(pkt_buf, skb->data, len);
216
217 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
218 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
219 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
220 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
221 SET_BITS(PKT_ADDRH,
222 upper_32_bits(dma_addr)));
223
224 tx_ring->pkt_info[tail].skb = skb;
225 tx_ring->pkt_info[tail].dma_addr = dma_addr;
226 tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
227
228 dma_wmb();
229
230 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
231 SET_BITS(PKT_SIZE, len) |
232 SET_BITS(E, 0));
233 skb_tx_timestamp(skb);
234 xge_wr_csr(pdata, DMATXCTRL, 1);
235
236 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
237
238 return NETDEV_TX_OK;
239}
240
241static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
242{
243 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
244 !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
245 return true;
246
247 return false;
248}
249
250static void xge_txc_poll(struct net_device *ndev)
251{
252 struct xge_pdata *pdata = netdev_priv(ndev);
253 struct device *dev = &pdata->pdev->dev;
254 struct xge_desc_ring *tx_ring;
255 struct xge_raw_desc *raw_desc;
256 dma_addr_t dma_addr;
257 struct sk_buff *skb;
258 void *pkt_buf;
259 u32 data;
260 u8 head;
261
262 tx_ring = pdata->tx_ring;
263 head = tx_ring->head;
264
265 data = xge_rd_csr(pdata, DMATXSTATUS);
266 if (!GET_BITS(TXPKTCOUNT, data))
267 return;
268
269 while (1) {
270 raw_desc = &tx_ring->raw_desc[head];
271
272 if (!is_tx_hw_done(raw_desc))
273 break;
274
275 dma_rmb();
276
277 skb = tx_ring->pkt_info[head].skb;
278 dma_addr = tx_ring->pkt_info[head].dma_addr;
279 pkt_buf = tx_ring->pkt_info[head].pkt_buf;
280 pdata->stats.tx_packets++;
281 pdata->stats.tx_bytes += skb->len;
282 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
283 dev_kfree_skb_any(skb);
284
285 /* clear pktstart address and pktsize */
286 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
287 SET_BITS(PKT_SIZE, SLOT_EMPTY));
288 xge_wr_csr(pdata, DMATXSTATUS, 1);
289
290 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
291 }
292
293 if (netif_queue_stopped(ndev))
294 netif_wake_queue(ndev);
295
296 tx_ring->head = head;
297}
298
299static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
300{
301 struct xge_pdata *pdata = netdev_priv(ndev);
302 struct device *dev = &pdata->pdev->dev;
303 struct xge_desc_ring *rx_ring;
304 struct xge_raw_desc *raw_desc;
305 struct sk_buff *skb;
306 dma_addr_t dma_addr;
307 int processed = 0;
308 u8 head, rx_error;
309 int i, ret;
310 u32 data;
311 u16 len;
312
313 rx_ring = pdata->rx_ring;
314 head = rx_ring->head;
315
316 data = xge_rd_csr(pdata, DMARXSTATUS);
317 if (!GET_BITS(RXPKTCOUNT, data))
318 return 0;
319
320 for (i = 0; i < budget; i++) {
321 raw_desc = &rx_ring->raw_desc[head];
322
323 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
324 break;
325
326 dma_rmb();
327
328 skb = rx_ring->pkt_info[head].skb;
329 rx_ring->pkt_info[head].skb = NULL;
330 dma_addr = rx_ring->pkt_info[head].dma_addr;
331 len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
332 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
333 DMA_FROM_DEVICE);
334
335 rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
336 if (unlikely(rx_error)) {
337 pdata->stats.rx_errors++;
338 dev_kfree_skb_any(skb);
339 goto out;
340 }
341
342 skb_put(skb, len);
343 skb->protocol = eth_type_trans(skb, ndev);
344
345 pdata->stats.rx_packets++;
346 pdata->stats.rx_bytes += len;
347 napi_gro_receive(&pdata->napi, skb);
348out:
349 ret = xge_refill_buffers(ndev, 1);
350 xge_wr_csr(pdata, DMARXSTATUS, 1);
351 xge_wr_csr(pdata, DMARXCTRL, 1);
352
353 if (ret)
354 break;
355
356 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
357 processed++;
358 }
359
360 rx_ring->head = head;
361
362 return processed;
363}
364
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800365static void xge_delete_desc_ring(struct net_device *ndev,
366 struct xge_desc_ring *ring)
367{
368 struct xge_pdata *pdata = netdev_priv(ndev);
369 struct device *dev = &pdata->pdev->dev;
370 u16 size;
371
372 if (!ring)
373 return;
374
375 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
376 if (ring->desc_addr)
377 dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
378
379 kfree(ring->pkt_info);
380 kfree(ring);
381}
382
383static void xge_free_buffers(struct net_device *ndev)
384{
385 struct xge_pdata *pdata = netdev_priv(ndev);
386 struct xge_desc_ring *ring = pdata->rx_ring;
387 struct device *dev = &pdata->pdev->dev;
388 struct sk_buff *skb;
389 dma_addr_t dma_addr;
390 int i;
391
392 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
393 skb = ring->pkt_info[i].skb;
394 dma_addr = ring->pkt_info[i].dma_addr;
395
396 if (!skb)
397 continue;
398
399 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
400 DMA_FROM_DEVICE);
401 dev_kfree_skb_any(skb);
402 }
403}
404
405static void xge_delete_desc_rings(struct net_device *ndev)
406{
407 struct xge_pdata *pdata = netdev_priv(ndev);
408
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800409 xge_txc_poll(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800410 xge_delete_desc_ring(ndev, pdata->tx_ring);
411
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800412 xge_rx_poll(ndev, 64);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800413 xge_free_buffers(ndev);
414 xge_delete_desc_ring(ndev, pdata->rx_ring);
415}
416
417static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
418{
419 struct xge_pdata *pdata = netdev_priv(ndev);
420 struct device *dev = &pdata->pdev->dev;
421 struct xge_desc_ring *ring;
422 u16 size;
423
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700424 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800425 if (!ring)
426 return NULL;
427
428 ring->ndev = ndev;
429
430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
Luis Chamberlain750afb02019-01-04 09:23:09 +0100431 ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
432 GFP_KERNEL);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800433 if (!ring->desc_addr)
434 goto err;
435
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700436 ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800437 GFP_KERNEL);
438 if (!ring->pkt_info)
439 goto err;
440
441 xge_setup_desc(ring);
442
443 return ring;
444
445err:
446 xge_delete_desc_ring(ndev, ring);
447
448 return NULL;
449}
450
451static int xge_create_desc_rings(struct net_device *ndev)
452{
453 struct xge_pdata *pdata = netdev_priv(ndev);
454 struct xge_desc_ring *ring;
455 int ret;
456
457 /* create tx ring */
458 ring = xge_create_desc_ring(ndev);
459 if (!ring)
460 goto err;
461
462 pdata->tx_ring = ring;
463 xge_update_tx_desc_addr(pdata);
464
465 /* create rx ring */
466 ring = xge_create_desc_ring(ndev);
467 if (!ring)
468 goto err;
469
470 pdata->rx_ring = ring;
471 xge_update_rx_desc_addr(pdata);
472
473 ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
474 if (ret)
475 goto err;
476
477 return 0;
478err:
479 xge_delete_desc_rings(ndev);
480
481 return -ENOMEM;
482}
483
484static int xge_open(struct net_device *ndev)
485{
486 struct xge_pdata *pdata = netdev_priv(ndev);
487 int ret;
488
489 ret = xge_create_desc_rings(ndev);
490 if (ret)
491 return ret;
492
493 napi_enable(&pdata->napi);
494 ret = xge_request_irq(ndev);
495 if (ret)
496 return ret;
497
498 xge_intr_enable(pdata);
499 xge_wr_csr(pdata, DMARXCTRL, 1);
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700500
501 phy_start(ndev->phydev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800502 xge_mac_enable(pdata);
503 netif_start_queue(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800504
505 return 0;
506}
507
508static int xge_close(struct net_device *ndev)
509{
510 struct xge_pdata *pdata = netdev_priv(ndev);
511
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800512 netif_stop_queue(ndev);
513 xge_mac_disable(pdata);
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700514 phy_stop(ndev->phydev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800515
516 xge_intr_disable(pdata);
517 xge_free_irq(ndev);
518 napi_disable(&pdata->napi);
519 xge_delete_desc_rings(ndev);
520
521 return 0;
522}
523
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800524static int xge_napi(struct napi_struct *napi, const int budget)
525{
526 struct net_device *ndev = napi->dev;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -0800527 struct xge_pdata *pdata;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800528 int processed;
529
530 pdata = netdev_priv(ndev);
531
532 xge_txc_poll(ndev);
533 processed = xge_rx_poll(ndev, budget);
534
535 if (processed < budget) {
536 napi_complete_done(napi, processed);
537 xge_intr_enable(pdata);
538 }
539
540 return processed;
541}
542
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800543static int xge_set_mac_addr(struct net_device *ndev, void *addr)
544{
545 struct xge_pdata *pdata = netdev_priv(ndev);
546 int ret;
547
548 ret = eth_mac_addr(ndev, addr);
549 if (ret)
550 return ret;
551
552 xge_mac_set_station_addr(pdata);
553
554 return 0;
555}
556
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800557static bool is_tx_pending(struct xge_raw_desc *raw_desc)
558{
559 if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
560 return true;
561
562 return false;
563}
564
565static void xge_free_pending_skb(struct net_device *ndev)
566{
567 struct xge_pdata *pdata = netdev_priv(ndev);
568 struct device *dev = &pdata->pdev->dev;
569 struct xge_desc_ring *tx_ring;
570 struct xge_raw_desc *raw_desc;
571 dma_addr_t dma_addr;
572 struct sk_buff *skb;
573 void *pkt_buf;
574 int i;
575
576 tx_ring = pdata->tx_ring;
577
578 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
579 raw_desc = &tx_ring->raw_desc[i];
580
581 if (!is_tx_pending(raw_desc))
582 continue;
583
584 skb = tx_ring->pkt_info[i].skb;
585 dma_addr = tx_ring->pkt_info[i].dma_addr;
586 pkt_buf = tx_ring->pkt_info[i].pkt_buf;
587 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
588 dev_kfree_skb_any(skb);
589 }
590}
591
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800592static void xge_timeout(struct net_device *ndev)
593{
594 struct xge_pdata *pdata = netdev_priv(ndev);
595
596 rtnl_lock();
597
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700598 if (!netif_running(ndev))
599 goto out;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800600
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700601 netif_stop_queue(ndev);
602 xge_intr_disable(pdata);
603 napi_disable(&pdata->napi);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800604
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700605 xge_wr_csr(pdata, DMATXCTRL, 0);
606 xge_txc_poll(ndev);
607 xge_free_pending_skb(ndev);
608 xge_wr_csr(pdata, DMATXSTATUS, ~0U);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800609
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700610 xge_setup_desc(pdata->tx_ring);
611 xge_update_tx_desc_addr(pdata);
612 xge_mac_init(pdata);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800613
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700614 napi_enable(&pdata->napi);
615 xge_intr_enable(pdata);
616 xge_mac_enable(pdata);
617 netif_start_queue(ndev);
618
619out:
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800620 rtnl_unlock();
621}
622
623static void xge_get_stats64(struct net_device *ndev,
624 struct rtnl_link_stats64 *storage)
625{
626 struct xge_pdata *pdata = netdev_priv(ndev);
627 struct xge_stats *stats = &pdata->stats;
628
629 storage->tx_packets += stats->tx_packets;
630 storage->tx_bytes += stats->tx_bytes;
631
632 storage->rx_packets += stats->rx_packets;
633 storage->rx_bytes += stats->rx_bytes;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800634 storage->rx_errors += stats->rx_errors;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800635}
636
637static const struct net_device_ops xgene_ndev_ops = {
638 .ndo_open = xge_open,
639 .ndo_stop = xge_close,
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800640 .ndo_start_xmit = xge_start_xmit,
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800641 .ndo_set_mac_address = xge_set_mac_addr,
642 .ndo_tx_timeout = xge_timeout,
643 .ndo_get_stats64 = xge_get_stats64,
644};
645
646static int xge_probe(struct platform_device *pdev)
647{
648 struct device *dev = &pdev->dev;
649 struct net_device *ndev;
650 struct xge_pdata *pdata;
651 int ret;
652
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700653 ndev = alloc_etherdev(sizeof(*pdata));
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800654 if (!ndev)
655 return -ENOMEM;
656
657 pdata = netdev_priv(ndev);
658
659 pdata->pdev = pdev;
660 pdata->ndev = ndev;
661 SET_NETDEV_DEV(ndev, dev);
662 platform_set_drvdata(pdev, pdata);
663 ndev->netdev_ops = &xgene_ndev_ops;
664
665 ndev->features |= NETIF_F_GSO |
666 NETIF_F_GRO;
667
668 ret = xge_get_resources(pdata);
669 if (ret)
670 goto err;
671
672 ndev->hw_features = ndev->features;
Iyappan Subramanian617d7952017-03-21 18:18:03 -0700673 xge_set_ethtool_ops(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800674
675 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
676 if (ret) {
677 netdev_err(ndev, "No usable DMA configuration\n");
678 goto err;
679 }
680
681 ret = xge_init_hw(ndev);
682 if (ret)
683 goto err;
684
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700685 ret = xge_mdio_config(ndev);
686 if (ret)
687 goto err;
688
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800689 netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
690
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800691 ret = register_netdev(ndev);
692 if (ret) {
693 netdev_err(ndev, "Failed to register netdev\n");
694 goto err;
695 }
696
697 return 0;
698
699err:
700 free_netdev(ndev);
701
702 return ret;
703}
704
705static int xge_remove(struct platform_device *pdev)
706{
707 struct xge_pdata *pdata;
708 struct net_device *ndev;
709
710 pdata = platform_get_drvdata(pdev);
711 ndev = pdata->ndev;
712
713 rtnl_lock();
714 if (netif_running(ndev))
715 dev_close(ndev);
716 rtnl_unlock();
717
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700718 xge_mdio_remove(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800719 unregister_netdev(ndev);
720 free_netdev(ndev);
721
722 return 0;
723}
724
725static void xge_shutdown(struct platform_device *pdev)
726{
727 struct xge_pdata *pdata;
728
729 pdata = platform_get_drvdata(pdev);
730 if (!pdata)
731 return;
732
733 if (!pdata->ndev)
734 return;
735
736 xge_remove(pdev);
737}
738
739static const struct acpi_device_id xge_acpi_match[] = {
740 { "APMC0D80" },
741 { }
742};
743MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
744
745static struct platform_driver xge_driver = {
746 .driver = {
747 .name = "xgene-enet-v2",
748 .acpi_match_table = ACPI_PTR(xge_acpi_match),
749 },
750 .probe = xge_probe,
751 .remove = xge_remove,
752 .shutdown = xge_shutdown,
753};
754module_platform_driver(xge_driver);
755
756MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
757MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
758MODULE_VERSION(XGENE_ENET_V2_VERSION);
759MODULE_LICENSE("GPL");