blob: df755d6bc8774d4cf2d96924c496f9573ce5a77a [file] [log] [blame]
Ioana Ciornei0bb29b22018-07-31 12:02:47 -05001// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002/* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2017 NXP
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05004 */
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/platform_device.h>
8#include <linux/etherdevice.h>
9#include <linux/of_net.h>
10#include <linux/interrupt.h>
11#include <linux/msi.h>
12#include <linux/kthread.h>
Ioana Radulescu08eb2392017-05-24 07:13:27 -050013#include <linux/iommu.h>
Ioana Radulescu859f9982018-04-26 18:23:47 +080014#include <linux/net_tstamp.h>
Bogdan Purcareata6bd067c2018-02-05 08:07:42 -060015#include <linux/fsl/mc.h>
Ioana Radulescu859f9982018-04-26 18:23:47 +080016
17#include <net/sock.h>
18
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050019#include "dpaa2-eth.h"
20
Ioana Radulescu56361872017-04-28 04:50:32 -050021/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
22 * using trace events only need to #include <trace/events/sched.h>
23 */
24#define CREATE_TRACE_POINTS
25#include "dpaa2-eth-trace.h"
26
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050027MODULE_LICENSE("Dual BSD/GPL");
28MODULE_AUTHOR("Freescale Semiconductor, Inc");
29MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
30
Ioana Radulescu08eb2392017-05-24 07:13:27 -050031static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
32 dma_addr_t iova_addr)
33{
34 phys_addr_t phys_addr;
35
36 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
37
38 return phys_to_virt(phys_addr);
39}
40
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050041static void validate_rx_csum(struct dpaa2_eth_priv *priv,
42 u32 fd_status,
43 struct sk_buff *skb)
44{
45 skb_checksum_none_assert(skb);
46
47 /* HW checksum validation is disabled, nothing to do here */
48 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
49 return;
50
51 /* Read checksum validation bits */
52 if (!((fd_status & DPAA2_FAS_L3CV) &&
53 (fd_status & DPAA2_FAS_L4CV)))
54 return;
55
56 /* Inform the stack there's no need to compute L3/L4 csum anymore */
57 skb->ip_summed = CHECKSUM_UNNECESSARY;
58}
59
60/* Free a received FD.
61 * Not to be used for Tx conf FDs or on any other paths.
62 */
63static void free_rx_fd(struct dpaa2_eth_priv *priv,
64 const struct dpaa2_fd *fd,
65 void *vaddr)
66{
67 struct device *dev = priv->net_dev->dev.parent;
68 dma_addr_t addr = dpaa2_fd_get_addr(fd);
69 u8 fd_format = dpaa2_fd_get_format(fd);
70 struct dpaa2_sg_entry *sgt;
71 void *sg_vaddr;
72 int i;
73
74 /* If single buffer frame, just free the data buffer */
75 if (fd_format == dpaa2_fd_single)
76 goto free_buf;
77 else if (fd_format != dpaa2_fd_sg)
78 /* We don't support any other format */
79 return;
80
Ioana Radulescu729d79b2017-10-11 08:29:48 -050081 /* For S/G frames, we first need to free all SG entries
82 * except the first one, which was taken care of already
83 */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050084 sgt = vaddr + dpaa2_fd_get_offset(fd);
Ioana Radulescu729d79b2017-10-11 08:29:48 -050085 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050086 addr = dpaa2_sg_get_addr(&sgt[i]);
Ioana Radulescu08eb2392017-05-24 07:13:27 -050087 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050088 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
89 DMA_FROM_DEVICE);
90
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050091 skb_free_frag(sg_vaddr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050092 if (dpaa2_sg_is_final(&sgt[i]))
93 break;
94 }
95
96free_buf:
97 skb_free_frag(vaddr);
98}
99
100/* Build a linear skb based on a single-buffer frame descriptor */
101static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
102 struct dpaa2_eth_channel *ch,
103 const struct dpaa2_fd *fd,
104 void *fd_vaddr)
105{
106 struct sk_buff *skb = NULL;
107 u16 fd_offset = dpaa2_fd_get_offset(fd);
108 u32 fd_length = dpaa2_fd_get_len(fd);
109
Ioana Radulescucbb3ea42017-10-11 08:29:44 -0500110 ch->buf_count--;
111
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +0000112 skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500113 if (unlikely(!skb))
114 return NULL;
115
116 skb_reserve(skb, fd_offset);
117 skb_put(skb, fd_length);
118
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500119 return skb;
120}
121
122/* Build a non linear (fragmented) skb based on a S/G table */
123static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 struct dpaa2_eth_channel *ch,
125 struct dpaa2_sg_entry *sgt)
126{
127 struct sk_buff *skb = NULL;
128 struct device *dev = priv->net_dev->dev.parent;
129 void *sg_vaddr;
130 dma_addr_t sg_addr;
131 u16 sg_offset;
132 u32 sg_length;
133 struct page *page, *head_page;
134 int page_offset;
135 int i;
136
137 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 struct dpaa2_sg_entry *sge = &sgt[i];
139
140 /* NOTE: We only support SG entries in dpaa2_sg_single format,
141 * but this is the only format we may receive from HW anyway
142 */
143
144 /* Get the address and length from the S/G entry */
145 sg_addr = dpaa2_sg_get_addr(sge);
Ioana Radulescu08eb2392017-05-24 07:13:27 -0500146 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500147 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148 DMA_FROM_DEVICE);
149
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500150 sg_length = dpaa2_sg_get_len(sge);
151
152 if (i == 0) {
153 /* We build the skb around the first data buffer */
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +0000154 skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
Ioana Radulescucbb3ea42017-10-11 08:29:44 -0500155 if (unlikely(!skb)) {
Ioana Radulescu729d79b2017-10-11 08:29:48 -0500156 /* Free the first SG entry now, since we already
157 * unmapped it and obtained the virtual address
158 */
159 skb_free_frag(sg_vaddr);
160
Ioana Radulescucbb3ea42017-10-11 08:29:44 -0500161 /* We still need to subtract the buffers used
162 * by this FD from our software counter
163 */
164 while (!dpaa2_sg_is_final(&sgt[i]) &&
165 i < DPAA2_ETH_MAX_SG_ENTRIES)
166 i++;
167 break;
168 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500169
170 sg_offset = dpaa2_sg_get_offset(sge);
171 skb_reserve(skb, sg_offset);
172 skb_put(skb, sg_length);
173 } else {
174 /* Rest of the data buffers are stored as skb frags */
175 page = virt_to_page(sg_vaddr);
176 head_page = virt_to_head_page(sg_vaddr);
177
178 /* Offset in page (which may be compound).
179 * Data in subsequent SG entries is stored from the
180 * beginning of the buffer, so we don't need to add the
181 * sg_offset.
182 */
183 page_offset = ((unsigned long)sg_vaddr &
184 (PAGE_SIZE - 1)) +
185 (page_address(page) - page_address(head_page));
186
187 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 sg_length, DPAA2_ETH_RX_BUF_SIZE);
189 }
190
191 if (dpaa2_sg_is_final(sge))
192 break;
193 }
194
Ioana Radulescub63baf72017-10-11 08:29:45 -0500195 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500197 /* Count all data buffers + SG table buffer */
198 ch->buf_count -= i + 2;
199
200 return skb;
201}
202
203/* Main Rx frame processing routine */
204static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
205 struct dpaa2_eth_channel *ch,
206 const struct dpaa2_fd *fd,
Ioana Radulescu537336c2017-12-21 06:33:20 -0600207 struct napi_struct *napi,
208 u16 queue_id)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500209{
210 dma_addr_t addr = dpaa2_fd_get_addr(fd);
211 u8 fd_format = dpaa2_fd_get_format(fd);
212 void *vaddr;
213 struct sk_buff *skb;
214 struct rtnl_link_stats64 *percpu_stats;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500215 struct dpaa2_eth_drv_stats *percpu_extras;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500216 struct device *dev = priv->net_dev->dev.parent;
217 struct dpaa2_fas *fas;
Ioana Radulescud695e762017-06-06 10:00:35 -0500218 void *buf_data;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500219 u32 status = 0;
220
Ioana Radulescu56361872017-04-28 04:50:32 -0500221 /* Tracing point */
222 trace_dpaa2_rx_fd(priv->net_dev, fd);
223
Ioana Radulescu08eb2392017-05-24 07:13:27 -0500224 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500225 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500226
Ioana Radulescu54ce8912017-12-08 06:47:53 -0600227 fas = dpaa2_get_fas(vaddr, false);
Ioana Radulescud695e762017-06-06 10:00:35 -0500228 prefetch(fas);
229 buf_data = vaddr + dpaa2_fd_get_offset(fd);
230 prefetch(buf_data);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500231
232 percpu_stats = this_cpu_ptr(priv->percpu_stats);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500233 percpu_extras = this_cpu_ptr(priv->percpu_extras);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500234
235 if (fd_format == dpaa2_fd_single) {
236 skb = build_linear_skb(priv, ch, fd, vaddr);
237 } else if (fd_format == dpaa2_fd_sg) {
Ioana Radulescud695e762017-06-06 10:00:35 -0500238 skb = build_frag_skb(priv, ch, buf_data);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500239 skb_free_frag(vaddr);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500240 percpu_extras->rx_sg_frames++;
241 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500242 } else {
243 /* We don't support any other format */
244 goto err_frame_format;
245 }
246
247 if (unlikely(!skb))
248 goto err_build_skb;
249
250 prefetch(skb->data);
251
Ioana Radulescu859f9982018-04-26 18:23:47 +0800252 /* Get the timestamp value */
253 if (priv->rx_tstamp) {
254 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
255 __le64 *ts = dpaa2_get_ts(vaddr, false);
256 u64 ns;
257
258 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
259
260 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
261 shhwtstamps->hwtstamp = ns_to_ktime(ns);
262 }
263
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500264 /* Check if we need to validate the L4 csum */
265 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500266 status = le32_to_cpu(fas->status);
267 validate_rx_csum(priv, status, skb);
268 }
269
270 skb->protocol = eth_type_trans(skb, priv->net_dev);
Ioana Radulescu537336c2017-12-21 06:33:20 -0600271 skb_record_rx_queue(skb, queue_id);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500272
273 percpu_stats->rx_packets++;
274 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
275
Ioana Radulescud4b37632017-06-06 10:00:31 -0500276 napi_gro_receive(napi, skb);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500277
278 return;
279
280err_build_skb:
281 free_rx_fd(priv, fd, vaddr);
282err_frame_format:
283 percpu_stats->rx_dropped++;
284}
285
286/* Consume all frames pull-dequeued into the store. This is the simplest way to
287 * make sure we don't accidentally issue another volatile dequeue which would
288 * overwrite (leak) frames already in the store.
289 *
290 * Observance of NAPI budget is not our concern, leaving that to the caller.
291 */
292static int consume_frames(struct dpaa2_eth_channel *ch)
293{
294 struct dpaa2_eth_priv *priv = ch->priv;
295 struct dpaa2_eth_fq *fq;
296 struct dpaa2_dq *dq;
297 const struct dpaa2_fd *fd;
298 int cleaned = 0;
299 int is_last;
300
301 do {
302 dq = dpaa2_io_store_next(ch->store, &is_last);
303 if (unlikely(!dq)) {
304 /* If we're here, we *must* have placed a
305 * volatile dequeue comnmand, so keep reading through
306 * the store until we get some sort of valid response
307 * token (either a valid frame or an "empty dequeue")
308 */
309 continue;
310 }
311
312 fd = dpaa2_dq_fd(dq);
Ioana Radulescu75c583a2018-02-26 10:28:06 -0600313 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500314 fq->stats.frames++;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500315
Ioana Radulescu537336c2017-12-21 06:33:20 -0600316 fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500317 cleaned++;
318 } while (!is_last);
319
320 return cleaned;
321}
322
Ioana Radulescu859f9982018-04-26 18:23:47 +0800323/* Configure the egress frame annotation for timestamp update */
324static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
325{
326 struct dpaa2_faead *faead;
327 u32 ctrl, frc;
328
329 /* Mark the egress frame annotation area as valid */
330 frc = dpaa2_fd_get_frc(fd);
331 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
332
333 /* Set hardware annotation size */
334 ctrl = dpaa2_fd_get_ctrl(fd);
335 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
336
337 /* enable UPD (update prepanded data) bit in FAEAD field of
338 * hardware frame annotation area
339 */
340 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
341 faead = dpaa2_get_faead(buf_start, true);
342 faead->ctrl = cpu_to_le32(ctrl);
343}
344
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500345/* Create a frame descriptor based on a fragmented skb */
346static int build_sg_fd(struct dpaa2_eth_priv *priv,
347 struct sk_buff *skb,
348 struct dpaa2_fd *fd)
349{
350 struct device *dev = priv->net_dev->dev.parent;
351 void *sgt_buf = NULL;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500352 dma_addr_t addr;
353 int nr_frags = skb_shinfo(skb)->nr_frags;
354 struct dpaa2_sg_entry *sgt;
355 int i, err;
356 int sgt_buf_size;
357 struct scatterlist *scl, *crt_scl;
358 int num_sg;
359 int num_dma_bufs;
360 struct dpaa2_eth_swa *swa;
361
362 /* Create and map scatterlist.
363 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
364 * to go beyond nr_frags+1.
365 * Note: We don't support chained scatterlists
366 */
367 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
368 return -EINVAL;
369
370 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
371 if (unlikely(!scl))
372 return -ENOMEM;
373
374 sg_init_table(scl, nr_frags + 1);
375 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500376 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500377 if (unlikely(!num_dma_bufs)) {
378 err = -ENOMEM;
379 goto dma_map_sg_failed;
380 }
381
382 /* Prepare the HW SGT structure */
383 sgt_buf_size = priv->tx_data_offset +
Ioana Radulescufa722c02018-03-23 08:44:12 -0500384 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500385 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500386 if (unlikely(!sgt_buf)) {
387 err = -ENOMEM;
388 goto sgt_buf_alloc_failed;
389 }
390 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500391 memset(sgt_buf, 0, sgt_buf_size);
392
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500393 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
394
395 /* Fill in the HW SGT structure.
396 *
397 * sgt_buf is zeroed out, so the following fields are implicit
398 * in all sgt entries:
399 * - offset is 0
400 * - format is 'dpaa2_sg_single'
401 */
402 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
403 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
404 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
405 }
406 dpaa2_sg_set_final(&sgt[i - 1], true);
407
408 /* Store the skb backpointer in the SGT buffer.
409 * Fit the scatterlist and the number of buffers alongside the
410 * skb backpointer in the software annotation area. We'll need
411 * all of them on Tx Conf.
412 */
413 swa = (struct dpaa2_eth_swa *)sgt_buf;
414 swa->skb = skb;
415 swa->scl = scl;
416 swa->num_sg = num_sg;
Ioana Radulescub2718e62018-03-23 08:44:11 -0500417 swa->sgt_size = sgt_buf_size;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500418
419 /* Separately map the SGT buffer */
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500420 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500421 if (unlikely(dma_mapping_error(dev, addr))) {
422 err = -ENOMEM;
423 goto dma_map_single_failed;
424 }
425 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
426 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
427 dpaa2_fd_set_addr(fd, addr);
428 dpaa2_fd_set_len(fd, skb->len);
Horia Geantă58ad0d02018-07-24 09:21:28 -0500429 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500430
Ioana Radulescu859f9982018-04-26 18:23:47 +0800431 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
432 enable_tx_tstamp(fd, sgt_buf);
433
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500434 return 0;
435
436dma_map_single_failed:
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500437 skb_free_frag(sgt_buf);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500438sgt_buf_alloc_failed:
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500439 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500440dma_map_sg_failed:
441 kfree(scl);
442 return err;
443}
444
445/* Create a frame descriptor based on a linear skb */
446static int build_single_fd(struct dpaa2_eth_priv *priv,
447 struct sk_buff *skb,
448 struct dpaa2_fd *fd)
449{
450 struct device *dev = priv->net_dev->dev.parent;
Ioana Radulescuc1636852017-12-08 06:47:58 -0600451 u8 *buffer_start, *aligned_start;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500452 struct sk_buff **skbh;
453 dma_addr_t addr;
454
Ioana Radulescuc1636852017-12-08 06:47:58 -0600455 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
456
457 /* If there's enough room to align the FD address, do it.
458 * It will help hardware optimize accesses.
459 */
460 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
461 DPAA2_ETH_TX_BUF_ALIGN);
462 if (aligned_start >= skb->head)
463 buffer_start = aligned_start;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500464
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500465 /* Store a backpointer to the skb at the beginning of the buffer
466 * (in the private data area) such that we can release it
467 * on Tx confirm
468 */
469 skbh = (struct sk_buff **)buffer_start;
470 *skbh = skb;
471
472 addr = dma_map_single(dev, buffer_start,
473 skb_tail_pointer(skb) - buffer_start,
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500474 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500475 if (unlikely(dma_mapping_error(dev, addr)))
476 return -ENOMEM;
477
478 dpaa2_fd_set_addr(fd, addr);
479 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
480 dpaa2_fd_set_len(fd, skb->len);
481 dpaa2_fd_set_format(fd, dpaa2_fd_single);
Horia Geantă58ad0d02018-07-24 09:21:28 -0500482 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500483
Ioana Radulescu859f9982018-04-26 18:23:47 +0800484 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
485 enable_tx_tstamp(fd, buffer_start);
486
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500487 return 0;
488}
489
490/* FD freeing routine on the Tx path
491 *
492 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
493 * back-pointed to is also freed.
494 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
495 * dpaa2_eth_tx().
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500496 */
497static void free_tx_fd(const struct dpaa2_eth_priv *priv,
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600498 const struct dpaa2_fd *fd)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500499{
500 struct device *dev = priv->net_dev->dev.parent;
501 dma_addr_t fd_addr;
502 struct sk_buff **skbh, *skb;
503 unsigned char *buffer_start;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500504 struct dpaa2_eth_swa *swa;
505 u8 fd_format = dpaa2_fd_get_format(fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500506
507 fd_addr = dpaa2_fd_get_addr(fd);
Ioana Radulescu08eb2392017-05-24 07:13:27 -0500508 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500509
510 if (fd_format == dpaa2_fd_single) {
511 skb = *skbh;
512 buffer_start = (unsigned char *)skbh;
513 /* Accessing the skb buffer is safe before dma unmap, because
514 * we didn't map the actual skb shell.
515 */
516 dma_unmap_single(dev, fd_addr,
517 skb_tail_pointer(skb) - buffer_start,
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500518 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500519 } else if (fd_format == dpaa2_fd_sg) {
520 swa = (struct dpaa2_eth_swa *)skbh;
521 skb = swa->skb;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500522
523 /* Unmap the scatterlist */
Ioana Radulescub2718e62018-03-23 08:44:11 -0500524 dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
525 kfree(swa->scl);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500526
527 /* Unmap the SGT buffer */
Ioana Radulescub2718e62018-03-23 08:44:11 -0500528 dma_unmap_single(dev, fd_addr, swa->sgt_size,
529 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500530 } else {
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600531 netdev_dbg(priv->net_dev, "Invalid FD format\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500532 return;
533 }
534
Ioana Radulescu859f9982018-04-26 18:23:47 +0800535 /* Get the timestamp value */
536 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
537 struct skb_shared_hwtstamps shhwtstamps;
538 __le64 *ts = dpaa2_get_ts(skbh, true);
539 u64 ns;
540
541 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
542
543 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
544 shhwtstamps.hwtstamp = ns_to_ktime(ns);
545 skb_tstamp_tx(skb, &shhwtstamps);
546 }
547
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500548 /* Free SGT buffer allocated on tx */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500549 if (fd_format != dpaa2_fd_single)
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500550 skb_free_frag(skbh);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500551
552 /* Move on with skb release */
553 dev_kfree_skb(skb);
554}
555
Ioana Radulescuc433db42017-06-06 10:00:26 -0500556static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500557{
558 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
559 struct dpaa2_fd fd;
560 struct rtnl_link_stats64 *percpu_stats;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500561 struct dpaa2_eth_drv_stats *percpu_extras;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500562 struct dpaa2_eth_fq *fq;
563 u16 queue_mapping;
Ioana Radulescu18c21462017-12-08 06:47:57 -0600564 unsigned int needed_headroom;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500565 int err, i;
566
567 percpu_stats = this_cpu_ptr(priv->percpu_stats);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500568 percpu_extras = this_cpu_ptr(priv->percpu_extras);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500569
Ioana Radulescu18c21462017-12-08 06:47:57 -0600570 needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
571 if (skb_headroom(skb) < needed_headroom) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500572 struct sk_buff *ns;
573
Ioana Radulescu18c21462017-12-08 06:47:57 -0600574 ns = skb_realloc_headroom(skb, needed_headroom);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500575 if (unlikely(!ns)) {
576 percpu_stats->tx_dropped++;
577 goto err_alloc_headroom;
578 }
Ioana Radulescu6662b5e2017-12-08 06:47:55 -0600579 percpu_extras->tx_reallocs++;
Ioana Radulescu859f9982018-04-26 18:23:47 +0800580
581 if (skb->sk)
582 skb_set_owner_w(ns, skb->sk);
583
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500584 dev_kfree_skb(skb);
585 skb = ns;
586 }
587
588 /* We'll be holding a back-reference to the skb until Tx Confirmation;
589 * we don't want that overwritten by a concurrent Tx with a cloned skb.
590 */
591 skb = skb_unshare(skb, GFP_ATOMIC);
592 if (unlikely(!skb)) {
593 /* skb_unshare() has already freed the skb */
594 percpu_stats->tx_dropped++;
595 return NETDEV_TX_OK;
596 }
597
598 /* Setup the FD fields */
599 memset(&fd, 0, sizeof(fd));
600
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500601 if (skb_is_nonlinear(skb)) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500602 err = build_sg_fd(priv, skb, &fd);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500603 percpu_extras->tx_sg_frames++;
604 percpu_extras->tx_sg_bytes += skb->len;
605 } else {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500606 err = build_single_fd(priv, skb, &fd);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500607 }
608
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500609 if (unlikely(err)) {
610 percpu_stats->tx_dropped++;
611 goto err_build_fd;
612 }
613
Ioana Radulescu56361872017-04-28 04:50:32 -0500614 /* Tracing point */
615 trace_dpaa2_tx_fd(net_dev, &fd);
616
Ioana Radulescu537336c2017-12-21 06:33:20 -0600617 /* TxConf FQ selection relies on queue id from the stack.
618 * In case of a forwarded frame from another DPNI interface, we choose
619 * a queue affined to the same core that processed the Rx frame
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500620 */
Ioana Radulescu537336c2017-12-21 06:33:20 -0600621 queue_mapping = skb_get_queue_mapping(skb);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500622 fq = &priv->fq[queue_mapping];
623 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600624 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
625 priv->tx_qdid, 0,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500626 fq->tx_qdbin, &fd);
627 if (err != -EBUSY)
628 break;
629 }
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500630 percpu_extras->tx_portal_busy += i;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500631 if (unlikely(err < 0)) {
632 percpu_stats->tx_errors++;
633 /* Clean up everything, including freeing the skb */
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600634 free_tx_fd(priv, &fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500635 } else {
636 percpu_stats->tx_packets++;
Bogdan Purcareata3c70d952017-07-20 10:58:37 +0000637 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500638 }
639
640 return NETDEV_TX_OK;
641
642err_build_fd:
643err_alloc_headroom:
644 dev_kfree_skb(skb);
645
646 return NETDEV_TX_OK;
647}
648
649/* Tx confirmation frame processing routine */
650static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
651 struct dpaa2_eth_channel *ch,
652 const struct dpaa2_fd *fd,
Ioana Radulescu537336c2017-12-21 06:33:20 -0600653 struct napi_struct *napi __always_unused,
654 u16 queue_id __always_unused)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500655{
656 struct rtnl_link_stats64 *percpu_stats;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500657 struct dpaa2_eth_drv_stats *percpu_extras;
Ioana Radulescu39163c02017-06-06 10:00:39 -0500658 u32 fd_errors;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500659
Ioana Radulescu56361872017-04-28 04:50:32 -0500660 /* Tracing point */
661 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
662
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500663 percpu_extras = this_cpu_ptr(priv->percpu_extras);
664 percpu_extras->tx_conf_frames++;
665 percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
666
Ioana Radulescu39163c02017-06-06 10:00:39 -0500667 /* Check frame errors in the FD field */
668 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600669 free_tx_fd(priv, fd);
Ioana Radulescu39163c02017-06-06 10:00:39 -0500670
671 if (likely(!fd_errors))
672 return;
673
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600674 if (net_ratelimit())
675 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
676 fd_errors);
677
Ioana Radulescu39163c02017-06-06 10:00:39 -0500678 percpu_stats = this_cpu_ptr(priv->percpu_stats);
679 /* Tx-conf logically pertains to the egress path. */
680 percpu_stats->tx_errors++;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500681}
682
683static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
684{
685 int err;
686
687 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
688 DPNI_OFF_RX_L3_CSUM, enable);
689 if (err) {
690 netdev_err(priv->net_dev,
691 "dpni_set_offload(RX_L3_CSUM) failed\n");
692 return err;
693 }
694
695 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
696 DPNI_OFF_RX_L4_CSUM, enable);
697 if (err) {
698 netdev_err(priv->net_dev,
699 "dpni_set_offload(RX_L4_CSUM) failed\n");
700 return err;
701 }
702
703 return 0;
704}
705
706static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
707{
708 int err;
709
710 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
711 DPNI_OFF_TX_L3_CSUM, enable);
712 if (err) {
713 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
714 return err;
715 }
716
717 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
718 DPNI_OFF_TX_L4_CSUM, enable);
719 if (err) {
720 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
721 return err;
722 }
723
724 return 0;
725}
726
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500727/* Free buffers acquired from the buffer pool or which were meant to
728 * be released in the pool
729 */
730static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
731{
732 struct device *dev = priv->net_dev->dev.parent;
733 void *vaddr;
734 int i;
735
736 for (i = 0; i < count; i++) {
737 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
738 dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
Ioana Radulescu466bcdc2018-07-09 10:01:07 -0500739 DMA_FROM_DEVICE);
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500740 skb_free_frag(vaddr);
741 }
742}
743
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500744/* Perform a single release command to add buffers
745 * to the specified buffer pool
746 */
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600747static int add_bufs(struct dpaa2_eth_priv *priv,
748 struct dpaa2_eth_channel *ch, u16 bpid)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500749{
750 struct device *dev = priv->net_dev->dev.parent;
751 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
752 void *buf;
753 dma_addr_t addr;
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500754 int i, err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500755
756 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
757 /* Allocate buffer visible to WRIOP + skb shared info +
758 * alignment padding
759 */
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +0000760 buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500761 if (unlikely(!buf))
762 goto err_alloc;
763
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +0000764 buf = PTR_ALIGN(buf, priv->rx_buf_align);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500765
766 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
767 DMA_FROM_DEVICE);
768 if (unlikely(dma_mapping_error(dev, addr)))
769 goto err_map;
770
771 buf_array[i] = addr;
Ioana Radulescu56361872017-04-28 04:50:32 -0500772
773 /* tracing point */
774 trace_dpaa2_eth_buf_seed(priv->net_dev,
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +0000775 buf, dpaa2_eth_buf_raw_size(priv),
Ioana Radulescu56361872017-04-28 04:50:32 -0500776 addr, DPAA2_ETH_RX_BUF_SIZE,
777 bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500778 }
779
780release_bufs:
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500781 /* In case the portal is busy, retry until successful */
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600782 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500783 buf_array, i)) == -EBUSY)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500784 cpu_relax();
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500785
786 /* If release command failed, clean up and bail out;
787 * not much else we can do about it
788 */
789 if (err) {
790 free_bufs(priv, buf_array, i);
791 return 0;
792 }
793
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500794 return i;
795
796err_map:
797 skb_free_frag(buf);
798err_alloc:
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500799 /* If we managed to allocate at least some buffers,
800 * release them to hardware
801 */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500802 if (i)
803 goto release_bufs;
804
805 return 0;
806}
807
808static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
809{
810 int i, j;
811 int new_count;
812
813 /* This is the lazy seeding of Rx buffer pools.
814 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
815 * napi_alloc_frag(). The trouble with that is that it in turn ends up
816 * calling this_cpu_ptr(), which mandates execution in atomic context.
817 * Rather than splitting up the code, do a one-off preempt disable.
818 */
819 preempt_disable();
820 for (j = 0; j < priv->num_channels; j++) {
821 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
822 i += DPAA2_ETH_BUFS_PER_CMD) {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600823 new_count = add_bufs(priv, priv->channel[j], bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500824 priv->channel[j]->buf_count += new_count;
825
826 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
827 preempt_enable();
828 return -ENOMEM;
829 }
830 }
831 }
832 preempt_enable();
833
834 return 0;
835}
836
837/**
838 * Drain the specified number of buffers from the DPNI's private buffer pool.
839 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
840 */
841static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
842{
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500843 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500844 int ret;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500845
846 do {
Ioana Radulescu05fa39c2017-06-06 10:00:37 -0500847 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500848 buf_array, count);
849 if (ret < 0) {
850 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
851 return;
852 }
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500853 free_bufs(priv, buf_array, ret);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500854 } while (ret);
855}
856
857static void drain_pool(struct dpaa2_eth_priv *priv)
858{
859 int i;
860
861 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
862 drain_bufs(priv, 1);
863
864 for (i = 0; i < priv->num_channels; i++)
865 priv->channel[i]->buf_count = 0;
866}
867
868/* Function is called from softirq context only, so we don't need to guard
869 * the access to percpu count
870 */
871static int refill_pool(struct dpaa2_eth_priv *priv,
872 struct dpaa2_eth_channel *ch,
873 u16 bpid)
874{
875 int new_count;
876
877 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
878 return 0;
879
880 do {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600881 new_count = add_bufs(priv, ch, bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500882 if (unlikely(!new_count)) {
883 /* Out of memory; abort for now, we'll try later on */
884 break;
885 }
886 ch->buf_count += new_count;
887 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
888
889 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
890 return -ENOMEM;
891
892 return 0;
893}
894
895static int pull_channel(struct dpaa2_eth_channel *ch)
896{
897 int err;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500898 int dequeues = -1;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500899
900 /* Retry while portal is busy */
901 do {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600902 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
903 ch->store);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500904 dequeues++;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500905 cpu_relax();
906 } while (err == -EBUSY);
907
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500908 ch->stats.dequeue_portal_busy += dequeues;
909 if (unlikely(err))
910 ch->stats.pull_err++;
911
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500912 return err;
913}
914
915/* NAPI poll routine
916 *
917 * Frames are dequeued from the QMan channel associated with this NAPI context.
918 * Rx, Tx confirmation and (if configured) Rx error frames all count
919 * towards the NAPI budget.
920 */
921static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
922{
923 struct dpaa2_eth_channel *ch;
924 int cleaned = 0, store_cleaned;
925 struct dpaa2_eth_priv *priv;
926 int err;
927
928 ch = container_of(napi, struct dpaa2_eth_channel, napi);
929 priv = ch->priv;
930
931 while (cleaned < budget) {
932 err = pull_channel(ch);
933 if (unlikely(err))
934 break;
935
936 /* Refill pool if appropriate */
Ioana Radulescu05fa39c2017-06-06 10:00:37 -0500937 refill_pool(priv, ch, priv->bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500938
939 store_cleaned = consume_frames(ch);
940 cleaned += store_cleaned;
941
942 /* If we have enough budget left for a full store,
943 * try a new pull dequeue, otherwise we're done here
944 */
945 if (store_cleaned == 0 ||
946 cleaned > budget - DPAA2_ETH_STORE_SIZE)
947 break;
948 }
949
Ioana Radulescue30bd4f2017-10-11 08:29:47 -0500950 if (cleaned < budget && napi_complete_done(napi, cleaned)) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500951 /* Re-enable data available notifications */
952 do {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600953 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500954 cpu_relax();
955 } while (err == -EBUSY);
Ioana Radulescu86580942017-10-11 08:29:46 -0500956 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
957 ch->nctx.desired_cpu);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500958 }
959
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500960 ch->stats.frames += cleaned;
961
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500962 return cleaned;
963}
964
965static void enable_ch_napi(struct dpaa2_eth_priv *priv)
966{
967 struct dpaa2_eth_channel *ch;
968 int i;
969
970 for (i = 0; i < priv->num_channels; i++) {
971 ch = priv->channel[i];
972 napi_enable(&ch->napi);
973 }
974}
975
976static void disable_ch_napi(struct dpaa2_eth_priv *priv)
977{
978 struct dpaa2_eth_channel *ch;
979 int i;
980
981 for (i = 0; i < priv->num_channels; i++) {
982 ch = priv->channel[i];
983 napi_disable(&ch->napi);
984 }
985}
986
987static int link_state_update(struct dpaa2_eth_priv *priv)
988{
989 struct dpni_link_state state;
990 int err;
991
992 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
993 if (unlikely(err)) {
994 netdev_err(priv->net_dev,
995 "dpni_get_link_state() failed\n");
996 return err;
997 }
998
999 /* Chech link state; speed / duplex changes are not treated yet */
1000 if (priv->link_state.up == state.up)
1001 return 0;
1002
1003 priv->link_state = state;
1004 if (state.up) {
1005 netif_carrier_on(priv->net_dev);
1006 netif_tx_start_all_queues(priv->net_dev);
1007 } else {
1008 netif_tx_stop_all_queues(priv->net_dev);
1009 netif_carrier_off(priv->net_dev);
1010 }
1011
Ioana Radulescu77160af2017-06-06 10:00:28 -05001012 netdev_info(priv->net_dev, "Link Event: state %s\n",
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001013 state.up ? "up" : "down");
1014
1015 return 0;
1016}
1017
1018static int dpaa2_eth_open(struct net_device *net_dev)
1019{
1020 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1021 int err;
1022
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001023 err = seed_pool(priv, priv->bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001024 if (err) {
1025 /* Not much to do; the buffer pool, though not filled up,
1026 * may still contain some buffers which would enable us
1027 * to limp on.
1028 */
1029 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001030 priv->dpbp_dev->obj_desc.id, priv->bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001031 }
1032
1033 /* We'll only start the txqs when the link is actually ready; make sure
1034 * we don't race against the link up notification, which may come
1035 * immediately after dpni_enable();
1036 */
1037 netif_tx_stop_all_queues(net_dev);
1038 enable_ch_napi(priv);
1039 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1040 * return true and cause 'ip link show' to report the LOWER_UP flag,
1041 * even though the link notification wasn't even received.
1042 */
1043 netif_carrier_off(net_dev);
1044
1045 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1046 if (err < 0) {
1047 netdev_err(net_dev, "dpni_enable() failed\n");
1048 goto enable_err;
1049 }
1050
1051 /* If the DPMAC object has already processed the link up interrupt,
1052 * we have to learn the link state ourselves.
1053 */
1054 err = link_state_update(priv);
1055 if (err < 0) {
1056 netdev_err(net_dev, "Can't update link state\n");
1057 goto link_state_err;
1058 }
1059
1060 return 0;
1061
1062link_state_err:
1063enable_err:
1064 disable_ch_napi(priv);
1065 drain_pool(priv);
1066 return err;
1067}
1068
1069/* The DPIO store must be empty when we call this,
1070 * at the end of every NAPI cycle.
1071 */
1072static u32 drain_channel(struct dpaa2_eth_priv *priv,
1073 struct dpaa2_eth_channel *ch)
1074{
1075 u32 drained = 0, total = 0;
1076
1077 do {
1078 pull_channel(ch);
1079 drained = consume_frames(ch);
1080 total += drained;
1081 } while (drained);
1082
1083 return total;
1084}
1085
1086static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1087{
1088 struct dpaa2_eth_channel *ch;
1089 int i;
1090 u32 drained = 0;
1091
1092 for (i = 0; i < priv->num_channels; i++) {
1093 ch = priv->channel[i];
1094 drained += drain_channel(priv, ch);
1095 }
1096
1097 return drained;
1098}
1099
1100static int dpaa2_eth_stop(struct net_device *net_dev)
1101{
1102 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1103 int dpni_enabled;
1104 int retries = 10;
1105 u32 drained;
1106
1107 netif_tx_stop_all_queues(net_dev);
1108 netif_carrier_off(net_dev);
1109
1110 /* Loop while dpni_disable() attempts to drain the egress FQs
1111 * and confirm them back to us.
1112 */
1113 do {
1114 dpni_disable(priv->mc_io, 0, priv->mc_token);
1115 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1116 if (dpni_enabled)
1117 /* Allow the hardware some slack */
1118 msleep(100);
1119 } while (dpni_enabled && --retries);
1120 if (!retries) {
1121 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1122 /* Must go on and disable NAPI nonetheless, so we don't crash at
1123 * the next "ifconfig up"
1124 */
1125 }
1126
1127 /* Wait for NAPI to complete on every core and disable it.
1128 * In particular, this will also prevent NAPI from being rescheduled if
1129 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1130 * don't even need to disarm the channels, except perhaps for the case
1131 * of a huge coalescing value.
1132 */
1133 disable_ch_napi(priv);
1134
1135 /* Manually drain the Rx and TxConf queues */
1136 drained = drain_ingress_frames(priv);
1137 if (drained)
1138 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1139
1140 /* Empty the buffer pool */
1141 drain_pool(priv);
1142
1143 return 0;
1144}
1145
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001146static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1147{
1148 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1149 struct device *dev = net_dev->dev.parent;
1150 int err;
1151
1152 err = eth_mac_addr(net_dev, addr);
1153 if (err < 0) {
1154 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1155 return err;
1156 }
1157
1158 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1159 net_dev->dev_addr);
1160 if (err) {
1161 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1162 return err;
1163 }
1164
1165 return 0;
1166}
1167
1168/** Fill in counters maintained by the GPP driver. These may be different from
1169 * the hardware counters obtained by ethtool.
1170 */
Ioana Radulescuacbff8e2017-06-06 10:00:24 -05001171static void dpaa2_eth_get_stats(struct net_device *net_dev,
1172 struct rtnl_link_stats64 *stats)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001173{
1174 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1175 struct rtnl_link_stats64 *percpu_stats;
1176 u64 *cpustats;
1177 u64 *netstats = (u64 *)stats;
1178 int i, j;
1179 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1180
1181 for_each_possible_cpu(i) {
1182 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1183 cpustats = (u64 *)percpu_stats;
1184 for (j = 0; j < num; j++)
1185 netstats[j] += cpustats[j];
1186 }
1187}
1188
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001189/* Copy mac unicast addresses from @net_dev to @priv.
1190 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1191 */
1192static void add_uc_hw_addr(const struct net_device *net_dev,
1193 struct dpaa2_eth_priv *priv)
1194{
1195 struct netdev_hw_addr *ha;
1196 int err;
1197
1198 netdev_for_each_uc_addr(ha, net_dev) {
1199 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1200 ha->addr);
1201 if (err)
1202 netdev_warn(priv->net_dev,
1203 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1204 ha->addr, err);
1205 }
1206}
1207
1208/* Copy mac multicast addresses from @net_dev to @priv
1209 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1210 */
1211static void add_mc_hw_addr(const struct net_device *net_dev,
1212 struct dpaa2_eth_priv *priv)
1213{
1214 struct netdev_hw_addr *ha;
1215 int err;
1216
1217 netdev_for_each_mc_addr(ha, net_dev) {
1218 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1219 ha->addr);
1220 if (err)
1221 netdev_warn(priv->net_dev,
1222 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1223 ha->addr, err);
1224 }
1225}
1226
1227static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1228{
1229 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1230 int uc_count = netdev_uc_count(net_dev);
1231 int mc_count = netdev_mc_count(net_dev);
1232 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1233 u32 options = priv->dpni_attrs.options;
1234 u16 mc_token = priv->mc_token;
1235 struct fsl_mc_io *mc_io = priv->mc_io;
1236 int err;
1237
1238 /* Basic sanity checks; these probably indicate a misconfiguration */
1239 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1240 netdev_info(net_dev,
1241 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1242 max_mac);
1243
1244 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1245 if (uc_count > max_mac) {
1246 netdev_info(net_dev,
1247 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1248 uc_count, max_mac);
1249 goto force_promisc;
1250 }
1251 if (mc_count + uc_count > max_mac) {
1252 netdev_info(net_dev,
1253 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1254 uc_count + mc_count, max_mac);
1255 goto force_mc_promisc;
1256 }
1257
1258 /* Adjust promisc settings due to flag combinations */
1259 if (net_dev->flags & IFF_PROMISC)
1260 goto force_promisc;
1261 if (net_dev->flags & IFF_ALLMULTI) {
1262 /* First, rebuild unicast filtering table. This should be done
1263 * in promisc mode, in order to avoid frame loss while we
1264 * progressively add entries to the table.
1265 * We don't know whether we had been in promisc already, and
1266 * making an MC call to find out is expensive; so set uc promisc
1267 * nonetheless.
1268 */
1269 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1270 if (err)
1271 netdev_warn(net_dev, "Can't set uc promisc\n");
1272
1273 /* Actual uc table reconstruction. */
1274 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1275 if (err)
1276 netdev_warn(net_dev, "Can't clear uc filters\n");
1277 add_uc_hw_addr(net_dev, priv);
1278
1279 /* Finally, clear uc promisc and set mc promisc as requested. */
1280 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1281 if (err)
1282 netdev_warn(net_dev, "Can't clear uc promisc\n");
1283 goto force_mc_promisc;
1284 }
1285
1286 /* Neither unicast, nor multicast promisc will be on... eventually.
1287 * For now, rebuild mac filtering tables while forcing both of them on.
1288 */
1289 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1290 if (err)
1291 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1292 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1293 if (err)
1294 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1295
1296 /* Actual mac filtering tables reconstruction */
1297 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1298 if (err)
1299 netdev_warn(net_dev, "Can't clear mac filters\n");
1300 add_mc_hw_addr(net_dev, priv);
1301 add_uc_hw_addr(net_dev, priv);
1302
1303 /* Now we can clear both ucast and mcast promisc, without risking
1304 * to drop legitimate frames anymore.
1305 */
1306 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1307 if (err)
1308 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1309 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1310 if (err)
1311 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1312
1313 return;
1314
1315force_promisc:
1316 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1317 if (err)
1318 netdev_warn(net_dev, "Can't set ucast promisc\n");
1319force_mc_promisc:
1320 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1321 if (err)
1322 netdev_warn(net_dev, "Can't set mcast promisc\n");
1323}
1324
1325static int dpaa2_eth_set_features(struct net_device *net_dev,
1326 netdev_features_t features)
1327{
1328 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1329 netdev_features_t changed = features ^ net_dev->features;
1330 bool enable;
1331 int err;
1332
1333 if (changed & NETIF_F_RXCSUM) {
1334 enable = !!(features & NETIF_F_RXCSUM);
1335 err = set_rx_csum(priv, enable);
1336 if (err)
1337 return err;
1338 }
1339
1340 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1341 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1342 err = set_tx_csum(priv, enable);
1343 if (err)
1344 return err;
1345 }
1346
1347 return 0;
1348}
1349
Ioana Radulescu859f9982018-04-26 18:23:47 +08001350static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1351{
1352 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1353 struct hwtstamp_config config;
1354
1355 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1356 return -EFAULT;
1357
1358 switch (config.tx_type) {
1359 case HWTSTAMP_TX_OFF:
1360 priv->tx_tstamp = false;
1361 break;
1362 case HWTSTAMP_TX_ON:
1363 priv->tx_tstamp = true;
1364 break;
1365 default:
1366 return -ERANGE;
1367 }
1368
1369 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1370 priv->rx_tstamp = false;
1371 } else {
1372 priv->rx_tstamp = true;
1373 /* TS is set for all frame types, not only those requested */
1374 config.rx_filter = HWTSTAMP_FILTER_ALL;
1375 }
1376
1377 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1378 -EFAULT : 0;
1379}
1380
1381static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1382{
1383 if (cmd == SIOCSHWTSTAMP)
1384 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1385
1386 return -EINVAL;
1387}
1388
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001389static const struct net_device_ops dpaa2_eth_ops = {
1390 .ndo_open = dpaa2_eth_open,
1391 .ndo_start_xmit = dpaa2_eth_tx,
1392 .ndo_stop = dpaa2_eth_stop,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001393 .ndo_set_mac_address = dpaa2_eth_set_addr,
1394 .ndo_get_stats64 = dpaa2_eth_get_stats,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001395 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1396 .ndo_set_features = dpaa2_eth_set_features,
Ioana Radulescu859f9982018-04-26 18:23:47 +08001397 .ndo_do_ioctl = dpaa2_eth_ioctl,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001398};
1399
1400static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1401{
1402 struct dpaa2_eth_channel *ch;
1403
1404 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
Ioana Radulescu85047ab2017-04-28 04:50:31 -05001405
1406 /* Update NAPI statistics */
1407 ch->stats.cdan++;
1408
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001409 napi_schedule_irqoff(&ch->napi);
1410}
1411
1412/* Allocate and configure a DPCON object */
1413static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1414{
1415 struct fsl_mc_device *dpcon;
1416 struct device *dev = priv->net_dev->dev.parent;
1417 struct dpcon_attr attrs;
1418 int err;
1419
1420 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1421 FSL_MC_POOL_DPCON, &dpcon);
1422 if (err) {
1423 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1424 return NULL;
1425 }
1426
1427 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1428 if (err) {
1429 dev_err(dev, "dpcon_open() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001430 goto free;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001431 }
1432
1433 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1434 if (err) {
1435 dev_err(dev, "dpcon_reset() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001436 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001437 }
1438
1439 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1440 if (err) {
1441 dev_err(dev, "dpcon_get_attributes() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001442 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001443 }
1444
1445 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1446 if (err) {
1447 dev_err(dev, "dpcon_enable() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001448 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001449 }
1450
1451 return dpcon;
1452
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001453close:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001454 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001455free:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001456 fsl_mc_object_free(dpcon);
1457
1458 return NULL;
1459}
1460
1461static void free_dpcon(struct dpaa2_eth_priv *priv,
1462 struct fsl_mc_device *dpcon)
1463{
1464 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1465 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1466 fsl_mc_object_free(dpcon);
1467}
1468
1469static struct dpaa2_eth_channel *
1470alloc_channel(struct dpaa2_eth_priv *priv)
1471{
1472 struct dpaa2_eth_channel *channel;
1473 struct dpcon_attr attr;
1474 struct device *dev = priv->net_dev->dev.parent;
1475 int err;
1476
1477 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1478 if (!channel)
1479 return NULL;
1480
1481 channel->dpcon = setup_dpcon(priv);
1482 if (!channel->dpcon)
1483 goto err_setup;
1484
1485 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1486 &attr);
1487 if (err) {
1488 dev_err(dev, "dpcon_get_attributes() failed\n");
1489 goto err_get_attr;
1490 }
1491
1492 channel->dpcon_id = attr.id;
1493 channel->ch_id = attr.qbman_ch_id;
1494 channel->priv = priv;
1495
1496 return channel;
1497
1498err_get_attr:
1499 free_dpcon(priv, channel->dpcon);
1500err_setup:
1501 kfree(channel);
1502 return NULL;
1503}
1504
1505static void free_channel(struct dpaa2_eth_priv *priv,
1506 struct dpaa2_eth_channel *channel)
1507{
1508 free_dpcon(priv, channel->dpcon);
1509 kfree(channel);
1510}
1511
1512/* DPIO setup: allocate and configure QBMan channels, setup core affinity
1513 * and register data availability notifications
1514 */
1515static int setup_dpio(struct dpaa2_eth_priv *priv)
1516{
1517 struct dpaa2_io_notification_ctx *nctx;
1518 struct dpaa2_eth_channel *channel;
1519 struct dpcon_notification_cfg dpcon_notif_cfg;
1520 struct device *dev = priv->net_dev->dev.parent;
1521 int i, err;
1522
1523 /* We want the ability to spread ingress traffic (RX, TX conf) to as
1524 * many cores as possible, so we need one channel for each core
1525 * (unless there's fewer queues than cores, in which case the extra
1526 * channels would be wasted).
1527 * Allocate one channel per core and register it to the core's
1528 * affine DPIO. If not enough channels are available for all cores
1529 * or if some cores don't have an affine DPIO, there will be no
1530 * ingress frame processing on those cores.
1531 */
1532 cpumask_clear(&priv->dpio_cpumask);
1533 for_each_online_cpu(i) {
1534 /* Try to allocate a channel */
1535 channel = alloc_channel(priv);
1536 if (!channel) {
1537 dev_info(dev,
1538 "No affine channel for cpu %d and above\n", i);
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001539 err = -ENODEV;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001540 goto err_alloc_ch;
1541 }
1542
1543 priv->channel[priv->num_channels] = channel;
1544
1545 nctx = &channel->nctx;
1546 nctx->is_cdan = 1;
1547 nctx->cb = cdan_cb;
1548 nctx->id = channel->ch_id;
1549 nctx->desired_cpu = i;
1550
1551 /* Register the new context */
Ioana Radulescu7ec05962018-01-05 05:04:32 -06001552 channel->dpio = dpaa2_io_service_select(i);
1553 err = dpaa2_io_service_register(channel->dpio, nctx);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001554 if (err) {
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001555 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001556 /* If no affine DPIO for this core, there's probably
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001557 * none available for next cores either. Signal we want
1558 * to retry later, in case the DPIO devices weren't
1559 * probed yet.
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001560 */
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001561 err = -EPROBE_DEFER;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001562 goto err_service_reg;
1563 }
1564
1565 /* Register DPCON notification with MC */
1566 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1567 dpcon_notif_cfg.priority = 0;
1568 dpcon_notif_cfg.user_ctx = nctx->qman64;
1569 err = dpcon_set_notification(priv->mc_io, 0,
1570 channel->dpcon->mc_handle,
1571 &dpcon_notif_cfg);
1572 if (err) {
1573 dev_err(dev, "dpcon_set_notification failed()\n");
1574 goto err_set_cdan;
1575 }
1576
1577 /* If we managed to allocate a channel and also found an affine
1578 * DPIO for this core, add it to the final mask
1579 */
1580 cpumask_set_cpu(i, &priv->dpio_cpumask);
1581 priv->num_channels++;
1582
1583 /* Stop if we already have enough channels to accommodate all
1584 * RX and TX conf queues
1585 */
1586 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1587 break;
1588 }
1589
1590 return 0;
1591
1592err_set_cdan:
Ioana Radulescu7ec05962018-01-05 05:04:32 -06001593 dpaa2_io_service_deregister(channel->dpio, nctx);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001594err_service_reg:
1595 free_channel(priv, channel);
1596err_alloc_ch:
1597 if (cpumask_empty(&priv->dpio_cpumask)) {
1598 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001599 return err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001600 }
1601
1602 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1603 cpumask_pr_args(&priv->dpio_cpumask));
1604
1605 return 0;
1606}
1607
1608static void free_dpio(struct dpaa2_eth_priv *priv)
1609{
1610 int i;
1611 struct dpaa2_eth_channel *ch;
1612
1613 /* deregister CDAN notifications and free channels */
1614 for (i = 0; i < priv->num_channels; i++) {
1615 ch = priv->channel[i];
Ioana Radulescu7ec05962018-01-05 05:04:32 -06001616 dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001617 free_channel(priv, ch);
1618 }
1619}
1620
1621static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1622 int cpu)
1623{
1624 struct device *dev = priv->net_dev->dev.parent;
1625 int i;
1626
1627 for (i = 0; i < priv->num_channels; i++)
1628 if (priv->channel[i]->nctx.desired_cpu == cpu)
1629 return priv->channel[i];
1630
1631 /* We should never get here. Issue a warning and return
1632 * the first channel, because it's still better than nothing
1633 */
1634 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1635
1636 return priv->channel[0];
1637}
1638
1639static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1640{
1641 struct device *dev = priv->net_dev->dev.parent;
Ioana Radulescu93ddf0b2017-12-21 06:33:21 -06001642 struct cpumask xps_mask;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001643 struct dpaa2_eth_fq *fq;
1644 int rx_cpu, txc_cpu;
Ioana Radulescu93ddf0b2017-12-21 06:33:21 -06001645 int i, err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001646
1647 /* For each FQ, pick one channel/CPU to deliver frames to.
1648 * This may well change at runtime, either through irqbalance or
1649 * through direct user intervention.
1650 */
1651 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1652
1653 for (i = 0; i < priv->num_fqs; i++) {
1654 fq = &priv->fq[i];
1655 switch (fq->type) {
1656 case DPAA2_RX_FQ:
1657 fq->target_cpu = rx_cpu;
1658 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1659 if (rx_cpu >= nr_cpu_ids)
1660 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1661 break;
1662 case DPAA2_TX_CONF_FQ:
1663 fq->target_cpu = txc_cpu;
Ioana Radulescu93ddf0b2017-12-21 06:33:21 -06001664
1665 /* Tell the stack to affine to txc_cpu the Tx queue
1666 * associated with the confirmation one
1667 */
1668 cpumask_clear(&xps_mask);
1669 cpumask_set_cpu(txc_cpu, &xps_mask);
1670 err = netif_set_xps_queue(priv->net_dev, &xps_mask,
1671 fq->flowid);
1672 if (err)
1673 dev_err(dev, "Error setting XPS queue\n");
1674
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001675 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1676 if (txc_cpu >= nr_cpu_ids)
1677 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1678 break;
1679 default:
1680 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1681 }
1682 fq->channel = get_affine_channel(priv, fq->target_cpu);
1683 }
1684}
1685
1686static void setup_fqs(struct dpaa2_eth_priv *priv)
1687{
1688 int i;
1689
1690 /* We have one TxConf FQ per Tx flow.
1691 * The number of Tx and Rx queues is the same.
1692 * Tx queues come first in the fq array.
1693 */
1694 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1695 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1696 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1697 priv->fq[priv->num_fqs++].flowid = (u16)i;
1698 }
1699
1700 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1701 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1702 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1703 priv->fq[priv->num_fqs++].flowid = (u16)i;
1704 }
1705
1706 /* For each FQ, decide on which core to process incoming frames */
1707 set_fq_affinity(priv);
1708}
1709
1710/* Allocate and configure one buffer pool for each interface */
1711static int setup_dpbp(struct dpaa2_eth_priv *priv)
1712{
1713 int err;
1714 struct fsl_mc_device *dpbp_dev;
1715 struct device *dev = priv->net_dev->dev.parent;
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001716 struct dpbp_attr dpbp_attrs;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001717
1718 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1719 &dpbp_dev);
1720 if (err) {
1721 dev_err(dev, "DPBP device allocation failed\n");
1722 return err;
1723 }
1724
1725 priv->dpbp_dev = dpbp_dev;
1726
1727 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1728 &dpbp_dev->mc_handle);
1729 if (err) {
1730 dev_err(dev, "dpbp_open() failed\n");
1731 goto err_open;
1732 }
1733
Ioana Radulescud00defe2017-06-06 10:00:32 -05001734 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
1735 if (err) {
1736 dev_err(dev, "dpbp_reset() failed\n");
1737 goto err_reset;
1738 }
1739
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001740 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1741 if (err) {
1742 dev_err(dev, "dpbp_enable() failed\n");
1743 goto err_enable;
1744 }
1745
1746 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001747 &dpbp_attrs);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001748 if (err) {
1749 dev_err(dev, "dpbp_get_attributes() failed\n");
1750 goto err_get_attr;
1751 }
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001752 priv->bpid = dpbp_attrs.bpid;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001753
1754 return 0;
1755
1756err_get_attr:
1757 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1758err_enable:
Ioana Radulescud00defe2017-06-06 10:00:32 -05001759err_reset:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001760 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1761err_open:
1762 fsl_mc_object_free(dpbp_dev);
1763
1764 return err;
1765}
1766
1767static void free_dpbp(struct dpaa2_eth_priv *priv)
1768{
1769 drain_pool(priv);
1770 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1771 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1772 fsl_mc_object_free(priv->dpbp_dev);
1773}
1774
Ioana Radulescu308f64e2017-10-29 08:20:40 +00001775static int set_buffer_layout(struct dpaa2_eth_priv *priv)
1776{
1777 struct device *dev = priv->net_dev->dev.parent;
1778 struct dpni_buffer_layout buf_layout = {0};
1779 int err;
1780
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +00001781 /* We need to check for WRIOP version 1.0.0, but depending on the MC
1782 * version, this number is not always provided correctly on rev1.
1783 * We need to check for both alternatives in this situation.
1784 */
1785 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
1786 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
1787 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
1788 else
1789 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
1790
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00001791 /* tx buffer */
Ioana Radulescu308f64e2017-10-29 08:20:40 +00001792 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
Ioana Radulescu859f9982018-04-26 18:23:47 +08001793 buf_layout.pass_timestamp = true;
1794 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1795 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
Ioana Radulescu308f64e2017-10-29 08:20:40 +00001796 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1797 DPNI_QUEUE_TX, &buf_layout);
1798 if (err) {
1799 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1800 return err;
1801 }
1802
1803 /* tx-confirm buffer */
Ioana Radulescu859f9982018-04-26 18:23:47 +08001804 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
Ioana Radulescu308f64e2017-10-29 08:20:40 +00001805 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1806 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
1807 if (err) {
1808 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1809 return err;
1810 }
1811
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00001812 /* Now that we've set our tx buffer layout, retrieve the minimum
1813 * required tx data offset.
1814 */
1815 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1816 &priv->tx_data_offset);
1817 if (err) {
1818 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1819 return err;
1820 }
1821
1822 if ((priv->tx_data_offset % 64) != 0)
1823 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
1824 priv->tx_data_offset);
1825
1826 /* rx buffer */
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -06001827 buf_layout.pass_frame_status = true;
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00001828 buf_layout.pass_parser_result = true;
1829 buf_layout.data_align = priv->rx_buf_align;
1830 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
1831 buf_layout.private_data_size = 0;
1832 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1833 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1834 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
Ioana Radulescu859f9982018-04-26 18:23:47 +08001835 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
1836 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00001837 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1838 DPNI_QUEUE_RX, &buf_layout);
1839 if (err) {
1840 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1841 return err;
1842 }
1843
Ioana Radulescu308f64e2017-10-29 08:20:40 +00001844 return 0;
1845}
1846
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001847/* Configure the DPNI object this interface is associated with */
1848static int setup_dpni(struct fsl_mc_device *ls_dev)
1849{
1850 struct device *dev = &ls_dev->dev;
1851 struct dpaa2_eth_priv *priv;
1852 struct net_device *net_dev;
1853 int err;
1854
1855 net_dev = dev_get_drvdata(dev);
1856 priv = netdev_priv(net_dev);
1857
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001858 /* get a handle for the DPNI object */
Ioana Radulescu50eacbc2017-06-06 10:00:36 -05001859 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001860 if (err) {
1861 dev_err(dev, "dpni_open() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001862 return err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001863 }
1864
Ioana Radulescu311cffa2018-03-23 08:44:09 -05001865 /* Check if we can work with this DPNI object */
1866 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
1867 &priv->dpni_ver_minor);
1868 if (err) {
1869 dev_err(dev, "dpni_get_api_version() failed\n");
1870 goto close;
1871 }
1872 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
1873 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
1874 priv->dpni_ver_major, priv->dpni_ver_minor,
1875 DPNI_VER_MAJOR, DPNI_VER_MINOR);
1876 err = -ENOTSUPP;
1877 goto close;
1878 }
1879
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001880 ls_dev->mc_io = priv->mc_io;
1881 ls_dev->mc_handle = priv->mc_token;
1882
1883 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1884 if (err) {
1885 dev_err(dev, "dpni_reset() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001886 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001887 }
1888
1889 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1890 &priv->dpni_attrs);
1891 if (err) {
1892 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001893 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001894 }
1895
Ioana Radulescu308f64e2017-10-29 08:20:40 +00001896 err = set_buffer_layout(priv);
1897 if (err)
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001898 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001899
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001900 return 0;
1901
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001902close:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001903 dpni_close(priv->mc_io, 0, priv->mc_token);
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001904
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001905 return err;
1906}
1907
1908static void free_dpni(struct dpaa2_eth_priv *priv)
1909{
1910 int err;
1911
1912 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1913 if (err)
1914 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1915 err);
1916
1917 dpni_close(priv->mc_io, 0, priv->mc_token);
1918}
1919
1920static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1921 struct dpaa2_eth_fq *fq)
1922{
1923 struct device *dev = priv->net_dev->dev.parent;
1924 struct dpni_queue queue;
1925 struct dpni_queue_id qid;
1926 struct dpni_taildrop td;
1927 int err;
1928
1929 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1930 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1931 if (err) {
1932 dev_err(dev, "dpni_get_queue(RX) failed\n");
1933 return err;
1934 }
1935
1936 fq->fqid = qid.fqid;
1937
1938 queue.destination.id = fq->channel->dpcon_id;
1939 queue.destination.type = DPNI_DEST_DPCON;
1940 queue.destination.priority = 1;
Ioana Radulescu75c583a2018-02-26 10:28:06 -06001941 queue.user_context = (u64)(uintptr_t)fq;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001942 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1943 DPNI_QUEUE_RX, 0, fq->flowid,
1944 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1945 &queue);
1946 if (err) {
1947 dev_err(dev, "dpni_set_queue(RX) failed\n");
1948 return err;
1949 }
1950
1951 td.enable = 1;
1952 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1953 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1954 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1955 if (err) {
1956 dev_err(dev, "dpni_set_threshold() failed\n");
1957 return err;
1958 }
1959
1960 return 0;
1961}
1962
1963static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1964 struct dpaa2_eth_fq *fq)
1965{
1966 struct device *dev = priv->net_dev->dev.parent;
1967 struct dpni_queue queue;
1968 struct dpni_queue_id qid;
1969 int err;
1970
1971 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1972 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1973 if (err) {
1974 dev_err(dev, "dpni_get_queue(TX) failed\n");
1975 return err;
1976 }
1977
1978 fq->tx_qdbin = qid.qdbin;
1979
1980 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1981 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1982 &queue, &qid);
1983 if (err) {
1984 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
1985 return err;
1986 }
1987
1988 fq->fqid = qid.fqid;
1989
1990 queue.destination.id = fq->channel->dpcon_id;
1991 queue.destination.type = DPNI_DEST_DPCON;
1992 queue.destination.priority = 0;
Ioana Radulescu75c583a2018-02-26 10:28:06 -06001993 queue.user_context = (u64)(uintptr_t)fq;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001994 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1995 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1996 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1997 &queue);
1998 if (err) {
1999 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2000 return err;
2001 }
2002
2003 return 0;
2004}
2005
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002006/* Supported header fields for Rx hash distribution key */
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002007static const struct dpaa2_eth_dist_fields dist_fields[] = {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002008 {
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002009 /* L2 header */
2010 .rxnfc_field = RXH_L2DA,
2011 .cls_prot = NET_PROT_ETH,
2012 .cls_field = NH_FLD_ETH_DA,
2013 .size = 6,
2014 }, {
2015 /* VLAN header */
2016 .rxnfc_field = RXH_VLAN,
2017 .cls_prot = NET_PROT_VLAN,
2018 .cls_field = NH_FLD_VLAN_TCI,
2019 .size = 2,
2020 }, {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002021 /* IP header */
2022 .rxnfc_field = RXH_IP_SRC,
2023 .cls_prot = NET_PROT_IP,
2024 .cls_field = NH_FLD_IP_SRC,
2025 .size = 4,
2026 }, {
2027 .rxnfc_field = RXH_IP_DST,
2028 .cls_prot = NET_PROT_IP,
2029 .cls_field = NH_FLD_IP_DST,
2030 .size = 4,
2031 }, {
2032 .rxnfc_field = RXH_L3_PROTO,
2033 .cls_prot = NET_PROT_IP,
2034 .cls_field = NH_FLD_IP_PROTO,
2035 .size = 1,
2036 }, {
2037 /* Using UDP ports, this is functionally equivalent to raw
2038 * byte pairs from L4 header.
2039 */
2040 .rxnfc_field = RXH_L4_B_0_1,
2041 .cls_prot = NET_PROT_UDP,
2042 .cls_field = NH_FLD_UDP_PORT_SRC,
2043 .size = 2,
2044 }, {
2045 .rxnfc_field = RXH_L4_B_2_3,
2046 .cls_prot = NET_PROT_UDP,
2047 .cls_field = NH_FLD_UDP_PORT_DST,
2048 .size = 2,
2049 },
2050};
2051
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002052/* Configure the Rx hash key using the legacy API */
2053static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2054{
2055 struct device *dev = priv->net_dev->dev.parent;
2056 struct dpni_rx_tc_dist_cfg dist_cfg;
2057 int err;
2058
2059 memset(&dist_cfg, 0, sizeof(dist_cfg));
2060
2061 dist_cfg.key_cfg_iova = key;
2062 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2063 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2064
2065 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2066 if (err)
2067 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2068
2069 return err;
2070}
2071
2072/* Configure the Rx hash key using the new API */
2073static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2074{
2075 struct device *dev = priv->net_dev->dev.parent;
2076 struct dpni_rx_dist_cfg dist_cfg;
2077 int err;
2078
2079 memset(&dist_cfg, 0, sizeof(dist_cfg));
2080
2081 dist_cfg.key_cfg_iova = key;
2082 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2083 dist_cfg.enable = 1;
2084
2085 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2086 if (err)
2087 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2088
2089 return err;
2090}
2091
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002092/* Set RX hash options
2093 * flags is a combination of RXH_ bits
2094 */
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002095int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002096{
2097 struct device *dev = net_dev->dev.parent;
2098 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2099 struct dpkg_profile_cfg cls_cfg;
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002100 u32 rx_hash_fields = 0;
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002101 dma_addr_t key_iova;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002102 u8 *dma_mem;
2103 int i;
2104 int err = 0;
2105
2106 if (!dpaa2_eth_hash_enabled(priv)) {
Ioana Radulescue202c822017-06-06 10:00:27 -05002107 dev_dbg(dev, "Hashing support is not enabled\n");
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002108 return -EOPNOTSUPP;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002109 }
2110
2111 memset(&cls_cfg, 0, sizeof(cls_cfg));
2112
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002113 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002114 struct dpkg_extract *key =
2115 &cls_cfg.extracts[cls_cfg.num_extracts];
2116
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002117 if (!(flags & dist_fields[i].rxnfc_field))
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002118 continue;
2119
2120 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2121 dev_err(dev, "error adding key extraction rule, too many rules?\n");
2122 return -E2BIG;
2123 }
2124
2125 key->type = DPKG_EXTRACT_FROM_HDR;
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002126 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002127 key->extract.from_hdr.type = DPKG_FULL_FIELD;
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002128 key->extract.from_hdr.field = dist_fields[i].cls_field;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002129 cls_cfg.num_extracts++;
Ioana Radulescu34196742017-04-28 04:50:30 -05002130
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002131 rx_hash_fields |= dist_fields[i].rxnfc_field;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002132 }
2133
Ioana Radulescue40ef9e2017-06-06 10:00:30 -05002134 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002135 if (!dma_mem)
2136 return -ENOMEM;
2137
2138 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2139 if (err) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002140 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002141 goto free_key;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002142 }
2143
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002144 /* Prepare for setting the rx dist */
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002145 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
2146 DMA_TO_DEVICE);
2147 if (dma_mapping_error(dev, key_iova)) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002148 dev_err(dev, "DMA mapping failed\n");
2149 err = -ENOMEM;
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002150 goto free_key;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002151 }
2152
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002153 if (dpaa2_eth_has_legacy_dist(priv))
2154 err = config_legacy_hash_key(priv, key_iova);
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002155 else
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002156 err = config_hash_key(priv, key_iova);
2157
2158 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
2159 DMA_TO_DEVICE);
2160 if (!err)
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002161 priv->rx_hash_fields = rx_hash_fields;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002162
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002163free_key:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002164 kfree(dma_mem);
2165 return err;
2166}
2167
2168/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2169 * frame queues and channels
2170 */
2171static int bind_dpni(struct dpaa2_eth_priv *priv)
2172{
2173 struct net_device *net_dev = priv->net_dev;
2174 struct device *dev = net_dev->dev.parent;
2175 struct dpni_pools_cfg pools_params;
2176 struct dpni_error_cfg err_cfg;
2177 int err = 0;
2178 int i;
2179
2180 pools_params.num_dpbp = 1;
2181 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2182 pools_params.pools[0].backup_pool = 0;
2183 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2184 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2185 if (err) {
2186 dev_err(dev, "dpni_set_pools() failed\n");
2187 return err;
2188 }
2189
Ioana Radulescu227686b2018-07-27 09:12:59 -05002190 /* have the interface implicitly distribute traffic based on
2191 * the default hash key
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002192 */
Ioana Radulescu227686b2018-07-27 09:12:59 -05002193 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002194 if (err && err != -EOPNOTSUPP)
Ioana Radulescu0f4c2952017-10-11 08:29:50 -05002195 dev_err(dev, "Failed to configure hashing\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002196
2197 /* Configure handling of error frames */
Ioana Radulescu39163c02017-06-06 10:00:39 -05002198 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002199 err_cfg.set_frame_annotation = 1;
2200 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2201 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2202 &err_cfg);
2203 if (err) {
2204 dev_err(dev, "dpni_set_errors_behavior failed\n");
2205 return err;
2206 }
2207
2208 /* Configure Rx and Tx conf queues to generate CDANs */
2209 for (i = 0; i < priv->num_fqs; i++) {
2210 switch (priv->fq[i].type) {
2211 case DPAA2_RX_FQ:
2212 err = setup_rx_flow(priv, &priv->fq[i]);
2213 break;
2214 case DPAA2_TX_CONF_FQ:
2215 err = setup_tx_flow(priv, &priv->fq[i]);
2216 break;
2217 default:
2218 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2219 return -EINVAL;
2220 }
2221 if (err)
2222 return err;
2223 }
2224
2225 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2226 DPNI_QUEUE_TX, &priv->tx_qdid);
2227 if (err) {
2228 dev_err(dev, "dpni_get_qdid() failed\n");
2229 return err;
2230 }
2231
2232 return 0;
2233}
2234
2235/* Allocate rings for storing incoming frame descriptors */
2236static int alloc_rings(struct dpaa2_eth_priv *priv)
2237{
2238 struct net_device *net_dev = priv->net_dev;
2239 struct device *dev = net_dev->dev.parent;
2240 int i;
2241
2242 for (i = 0; i < priv->num_channels; i++) {
2243 priv->channel[i]->store =
2244 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2245 if (!priv->channel[i]->store) {
2246 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2247 goto err_ring;
2248 }
2249 }
2250
2251 return 0;
2252
2253err_ring:
2254 for (i = 0; i < priv->num_channels; i++) {
2255 if (!priv->channel[i]->store)
2256 break;
2257 dpaa2_io_store_destroy(priv->channel[i]->store);
2258 }
2259
2260 return -ENOMEM;
2261}
2262
2263static void free_rings(struct dpaa2_eth_priv *priv)
2264{
2265 int i;
2266
2267 for (i = 0; i < priv->num_channels; i++)
2268 dpaa2_io_store_destroy(priv->channel[i]->store);
2269}
2270
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002271static int set_mac_addr(struct dpaa2_eth_priv *priv)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002272{
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002273 struct net_device *net_dev = priv->net_dev;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002274 struct device *dev = net_dev->dev.parent;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002275 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002276 int err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002277
2278 /* Get firmware address, if any */
2279 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2280 if (err) {
2281 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2282 return err;
2283 }
2284
2285 /* Get DPNI attributes address, if any */
2286 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2287 dpni_mac_addr);
2288 if (err) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002289 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002290 return err;
2291 }
2292
2293 /* First check if firmware has any address configured by bootloader */
2294 if (!is_zero_ether_addr(mac_addr)) {
2295 /* If the DPMAC addr != DPNI addr, update it */
2296 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2297 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2298 priv->mc_token,
2299 mac_addr);
2300 if (err) {
2301 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2302 return err;
2303 }
2304 }
2305 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2306 } else if (is_zero_ether_addr(dpni_mac_addr)) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002307 /* No MAC address configured, fill in net_dev->dev_addr
2308 * with a random one
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002309 */
2310 eth_hw_addr_random(net_dev);
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002311 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2312
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002313 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2314 net_dev->dev_addr);
2315 if (err) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002316 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002317 return err;
2318 }
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002319
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002320 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2321 * practical purposes, this will be our "permanent" mac address,
2322 * at least until the next reboot. This move will also permit
2323 * register_netdevice() to properly fill up net_dev->perm_addr.
2324 */
2325 net_dev->addr_assign_type = NET_ADDR_PERM;
2326 } else {
2327 /* NET_ADDR_PERM is default, all we have to do is
2328 * fill in the device addr.
2329 */
2330 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2331 }
2332
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002333 return 0;
2334}
2335
2336static int netdev_init(struct net_device *net_dev)
2337{
2338 struct device *dev = net_dev->dev.parent;
2339 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002340 u32 options = priv->dpni_attrs.options;
2341 u64 supported = 0, not_supported = 0;
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002342 u8 bcast_addr[ETH_ALEN];
Ioana Radulescubb5b42c2017-06-06 10:00:41 -05002343 u8 num_queues;
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002344 int err;
2345
2346 net_dev->netdev_ops = &dpaa2_eth_ops;
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002347 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002348
2349 err = set_mac_addr(priv);
2350 if (err)
2351 return err;
2352
2353 /* Explicitly add the broadcast address to the MAC filtering table */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002354 eth_broadcast_addr(bcast_addr);
2355 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2356 if (err) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002357 dev_err(dev, "dpni_add_mac_addr() failed\n");
2358 return err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002359 }
2360
Ioana Radulescu3ccc8d42018-07-09 10:01:10 -05002361 /* Set MTU upper limit; lower limit is 68B (default value) */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002362 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
Ioana Radulescu00fee002018-07-09 10:01:11 -05002363 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
Ioana Radulescu81f34e92018-07-12 12:12:29 -05002364 DPAA2_ETH_MFL);
Ioana Radulescu00fee002018-07-09 10:01:11 -05002365 if (err) {
2366 dev_err(dev, "dpni_set_max_frame_length() failed\n");
2367 return err;
2368 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002369
Ioana Radulescubb5b42c2017-06-06 10:00:41 -05002370 /* Set actual number of queues in the net device */
2371 num_queues = dpaa2_eth_queue_count(priv);
2372 err = netif_set_real_num_tx_queues(net_dev, num_queues);
2373 if (err) {
2374 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
2375 return err;
2376 }
2377 err = netif_set_real_num_rx_queues(net_dev, num_queues);
2378 if (err) {
2379 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
2380 return err;
2381 }
2382
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002383 /* Capabilities listing */
2384 supported |= IFF_LIVE_ADDR_CHANGE;
2385
2386 if (options & DPNI_OPT_NO_MAC_FILTER)
2387 not_supported |= IFF_UNICAST_FLT;
2388 else
2389 supported |= IFF_UNICAST_FLT;
2390
2391 net_dev->priv_flags |= supported;
2392 net_dev->priv_flags &= ~not_supported;
2393
2394 /* Features */
2395 net_dev->features = NETIF_F_RXCSUM |
2396 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2397 NETIF_F_SG | NETIF_F_HIGHDMA |
2398 NETIF_F_LLTX;
2399 net_dev->hw_features = net_dev->features;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002400
2401 return 0;
2402}
2403
2404static int poll_link_state(void *arg)
2405{
2406 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2407 int err;
2408
2409 while (!kthread_should_stop()) {
2410 err = link_state_update(priv);
2411 if (unlikely(err))
2412 return err;
2413
2414 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2415 }
2416
2417 return 0;
2418}
2419
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002420static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2421{
Ioana Radulescu112197d2017-10-11 08:29:49 -05002422 u32 status = ~0;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002423 struct device *dev = (struct device *)arg;
2424 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2425 struct net_device *net_dev = dev_get_drvdata(dev);
2426 int err;
2427
2428 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2429 DPNI_IRQ_INDEX, &status);
2430 if (unlikely(err)) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002431 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
Ioana Radulescu112197d2017-10-11 08:29:49 -05002432 return IRQ_HANDLED;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002433 }
2434
Ioana Radulescu112197d2017-10-11 08:29:49 -05002435 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002436 link_state_update(netdev_priv(net_dev));
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002437
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002438 return IRQ_HANDLED;
2439}
2440
2441static int setup_irqs(struct fsl_mc_device *ls_dev)
2442{
2443 int err = 0;
2444 struct fsl_mc_device_irq *irq;
2445
2446 err = fsl_mc_allocate_irqs(ls_dev);
2447 if (err) {
2448 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2449 return err;
2450 }
2451
2452 irq = ls_dev->irqs[0];
2453 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
Ioana Radulescufdc9b532018-03-23 08:44:05 -05002454 NULL, dpni_irq0_handler_thread,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002455 IRQF_NO_SUSPEND | IRQF_ONESHOT,
2456 dev_name(&ls_dev->dev), &ls_dev->dev);
2457 if (err < 0) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002458 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002459 goto free_mc_irq;
2460 }
2461
2462 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2463 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2464 if (err < 0) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002465 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002466 goto free_irq;
2467 }
2468
2469 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2470 DPNI_IRQ_INDEX, 1);
2471 if (err < 0) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002472 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002473 goto free_irq;
2474 }
2475
2476 return 0;
2477
2478free_irq:
2479 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2480free_mc_irq:
2481 fsl_mc_free_irqs(ls_dev);
2482
2483 return err;
2484}
2485
2486static void add_ch_napi(struct dpaa2_eth_priv *priv)
2487{
2488 int i;
2489 struct dpaa2_eth_channel *ch;
2490
2491 for (i = 0; i < priv->num_channels; i++) {
2492 ch = priv->channel[i];
2493 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2494 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2495 NAPI_POLL_WEIGHT);
2496 }
2497}
2498
2499static void del_ch_napi(struct dpaa2_eth_priv *priv)
2500{
2501 int i;
2502 struct dpaa2_eth_channel *ch;
2503
2504 for (i = 0; i < priv->num_channels; i++) {
2505 ch = priv->channel[i];
2506 netif_napi_del(&ch->napi);
2507 }
2508}
2509
2510static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2511{
2512 struct device *dev;
2513 struct net_device *net_dev = NULL;
2514 struct dpaa2_eth_priv *priv = NULL;
2515 int err = 0;
2516
2517 dev = &dpni_dev->dev;
2518
2519 /* Net device */
2520 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2521 if (!net_dev) {
2522 dev_err(dev, "alloc_etherdev_mq() failed\n");
2523 return -ENOMEM;
2524 }
2525
2526 SET_NETDEV_DEV(net_dev, dev);
2527 dev_set_drvdata(dev, net_dev);
2528
2529 priv = netdev_priv(net_dev);
2530 priv->net_dev = net_dev;
2531
Ioana Radulescu08eb2392017-05-24 07:13:27 -05002532 priv->iommu_domain = iommu_get_domain_for_dev(dev);
2533
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002534 /* Obtain a MC portal */
2535 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2536 &priv->mc_io);
2537 if (err) {
Ioana Radulescu8c369612018-03-20 07:04:46 -05002538 if (err == -ENXIO)
2539 err = -EPROBE_DEFER;
2540 else
2541 dev_err(dev, "MC portal allocation failed\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002542 goto err_portal_alloc;
2543 }
2544
2545 /* MC objects initialization and configuration */
2546 err = setup_dpni(dpni_dev);
2547 if (err)
2548 goto err_dpni_setup;
2549
2550 err = setup_dpio(priv);
2551 if (err)
2552 goto err_dpio_setup;
2553
2554 setup_fqs(priv);
2555
2556 err = setup_dpbp(priv);
2557 if (err)
2558 goto err_dpbp_setup;
2559
2560 err = bind_dpni(priv);
2561 if (err)
2562 goto err_bind;
2563
2564 /* Add a NAPI context for each channel */
2565 add_ch_napi(priv);
2566
2567 /* Percpu statistics */
2568 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2569 if (!priv->percpu_stats) {
2570 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2571 err = -ENOMEM;
2572 goto err_alloc_percpu_stats;
2573 }
Ioana Radulescu85047ab2017-04-28 04:50:31 -05002574 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2575 if (!priv->percpu_extras) {
2576 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2577 err = -ENOMEM;
2578 goto err_alloc_percpu_extras;
2579 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002580
2581 err = netdev_init(net_dev);
2582 if (err)
2583 goto err_netdev_init;
2584
2585 /* Configure checksum offload based on current interface flags */
2586 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2587 if (err)
2588 goto err_csum;
2589
2590 err = set_tx_csum(priv, !!(net_dev->features &
2591 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2592 if (err)
2593 goto err_csum;
2594
2595 err = alloc_rings(priv);
2596 if (err)
2597 goto err_alloc_rings;
2598
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002599 err = setup_irqs(dpni_dev);
2600 if (err) {
2601 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2602 priv->poll_thread = kthread_run(poll_link_state, priv,
2603 "%s_poll_link", net_dev->name);
2604 if (IS_ERR(priv->poll_thread)) {
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002605 dev_err(dev, "Error starting polling thread\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002606 goto err_poll_thread;
2607 }
2608 priv->do_link_poll = true;
2609 }
2610
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002611 err = register_netdev(net_dev);
2612 if (err < 0) {
2613 dev_err(dev, "register_netdev() failed\n");
2614 goto err_netdev_reg;
2615 }
2616
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002617 dev_info(dev, "Probed interface %s\n", net_dev->name);
2618 return 0;
2619
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002620err_netdev_reg:
2621 if (priv->do_link_poll)
2622 kthread_stop(priv->poll_thread);
2623 else
2624 fsl_mc_free_irqs(dpni_dev);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002625err_poll_thread:
2626 free_rings(priv);
2627err_alloc_rings:
2628err_csum:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002629err_netdev_init:
Ioana Radulescu85047ab2017-04-28 04:50:31 -05002630 free_percpu(priv->percpu_extras);
2631err_alloc_percpu_extras:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002632 free_percpu(priv->percpu_stats);
2633err_alloc_percpu_stats:
2634 del_ch_napi(priv);
2635err_bind:
2636 free_dpbp(priv);
2637err_dpbp_setup:
2638 free_dpio(priv);
2639err_dpio_setup:
2640 free_dpni(priv);
2641err_dpni_setup:
2642 fsl_mc_portal_free(priv->mc_io);
2643err_portal_alloc:
2644 dev_set_drvdata(dev, NULL);
2645 free_netdev(net_dev);
2646
2647 return err;
2648}
2649
2650static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2651{
2652 struct device *dev;
2653 struct net_device *net_dev;
2654 struct dpaa2_eth_priv *priv;
2655
2656 dev = &ls_dev->dev;
2657 net_dev = dev_get_drvdata(dev);
2658 priv = netdev_priv(net_dev);
2659
2660 unregister_netdev(net_dev);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002661
2662 if (priv->do_link_poll)
2663 kthread_stop(priv->poll_thread);
2664 else
2665 fsl_mc_free_irqs(ls_dev);
2666
2667 free_rings(priv);
2668 free_percpu(priv->percpu_stats);
Ioana Radulescu85047ab2017-04-28 04:50:31 -05002669 free_percpu(priv->percpu_extras);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002670
2671 del_ch_napi(priv);
2672 free_dpbp(priv);
2673 free_dpio(priv);
2674 free_dpni(priv);
2675
2676 fsl_mc_portal_free(priv->mc_io);
2677
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002678 free_netdev(net_dev);
2679
Ioana Radulescu4bc07aa2018-03-23 10:23:36 -05002680 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
Ioana Radulescu7472dd92018-03-23 08:44:06 -05002681
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002682 return 0;
2683}
2684
2685static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2686 {
2687 .vendor = FSL_MC_VENDOR_FREESCALE,
2688 .obj_type = "dpni",
2689 },
2690 { .vendor = 0x0 }
2691};
2692MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2693
2694static struct fsl_mc_driver dpaa2_eth_driver = {
2695 .driver = {
2696 .name = KBUILD_MODNAME,
2697 .owner = THIS_MODULE,
2698 },
2699 .probe = dpaa2_eth_probe,
2700 .remove = dpaa2_eth_remove,
2701 .match_id_table = dpaa2_eth_match_id_table
2702};
2703
2704module_fsl_mc_driver(dpaa2_eth_driver);