blob: c2e880bb4015f21281ba3327f39dd61bc11e3113 [file] [log] [blame]
Ioana Ciornei0bb29b22018-07-31 12:02:47 -05001// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002/* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2017 NXP
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05004 */
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/platform_device.h>
8#include <linux/etherdevice.h>
9#include <linux/of_net.h>
10#include <linux/interrupt.h>
11#include <linux/msi.h>
12#include <linux/kthread.h>
Ioana Radulescu08eb2392017-05-24 07:13:27 -050013#include <linux/iommu.h>
Ioana Radulescu859f9982018-04-26 18:23:47 +080014#include <linux/net_tstamp.h>
Bogdan Purcareata6bd067c2018-02-05 08:07:42 -060015#include <linux/fsl/mc.h>
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +000016#include <linux/bpf.h>
17#include <linux/bpf_trace.h>
Ioana Radulescu859f9982018-04-26 18:23:47 +080018#include <net/sock.h>
19
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050020#include "dpaa2-eth.h"
21
Ioana Radulescu56361872017-04-28 04:50:32 -050022/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
23 * using trace events only need to #include <trace/events/sched.h>
24 */
25#define CREATE_TRACE_POINTS
26#include "dpaa2-eth-trace.h"
27
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050028MODULE_LICENSE("Dual BSD/GPL");
29MODULE_AUTHOR("Freescale Semiconductor, Inc");
30MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
31
Ioana Radulescu08eb2392017-05-24 07:13:27 -050032static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
33 dma_addr_t iova_addr)
34{
35 phys_addr_t phys_addr;
36
37 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
38
39 return phys_to_virt(phys_addr);
40}
41
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050042static void validate_rx_csum(struct dpaa2_eth_priv *priv,
43 u32 fd_status,
44 struct sk_buff *skb)
45{
46 skb_checksum_none_assert(skb);
47
48 /* HW checksum validation is disabled, nothing to do here */
49 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
50 return;
51
52 /* Read checksum validation bits */
53 if (!((fd_status & DPAA2_FAS_L3CV) &&
54 (fd_status & DPAA2_FAS_L4CV)))
55 return;
56
57 /* Inform the stack there's no need to compute L3/L4 csum anymore */
58 skb->ip_summed = CHECKSUM_UNNECESSARY;
59}
60
61/* Free a received FD.
62 * Not to be used for Tx conf FDs or on any other paths.
63 */
64static void free_rx_fd(struct dpaa2_eth_priv *priv,
65 const struct dpaa2_fd *fd,
66 void *vaddr)
67{
68 struct device *dev = priv->net_dev->dev.parent;
69 dma_addr_t addr = dpaa2_fd_get_addr(fd);
70 u8 fd_format = dpaa2_fd_get_format(fd);
71 struct dpaa2_sg_entry *sgt;
72 void *sg_vaddr;
73 int i;
74
75 /* If single buffer frame, just free the data buffer */
76 if (fd_format == dpaa2_fd_single)
77 goto free_buf;
78 else if (fd_format != dpaa2_fd_sg)
79 /* We don't support any other format */
80 return;
81
Ioana Radulescu729d79b2017-10-11 08:29:48 -050082 /* For S/G frames, we first need to free all SG entries
83 * except the first one, which was taken care of already
84 */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050085 sgt = vaddr + dpaa2_fd_get_offset(fd);
Ioana Radulescu729d79b2017-10-11 08:29:48 -050086 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050087 addr = dpaa2_sg_get_addr(&sgt[i]);
Ioana Radulescu08eb2392017-05-24 07:13:27 -050088 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050089 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
Ioana Ciocoi Radulescu18c2e772018-11-26 16:27:32 +000090 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050091
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050092 skb_free_frag(sg_vaddr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -050093 if (dpaa2_sg_is_final(&sgt[i]))
94 break;
95 }
96
97free_buf:
98 skb_free_frag(vaddr);
99}
100
101/* Build a linear skb based on a single-buffer frame descriptor */
Ioana Ciorneifdb6ca92018-10-12 16:27:35 +0000102static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500103 const struct dpaa2_fd *fd,
104 void *fd_vaddr)
105{
106 struct sk_buff *skb = NULL;
107 u16 fd_offset = dpaa2_fd_get_offset(fd);
108 u32 fd_length = dpaa2_fd_get_len(fd);
109
Ioana Radulescucbb3ea42017-10-11 08:29:44 -0500110 ch->buf_count--;
111
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +0000112 skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500113 if (unlikely(!skb))
114 return NULL;
115
116 skb_reserve(skb, fd_offset);
117 skb_put(skb, fd_length);
118
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500119 return skb;
120}
121
122/* Build a non linear (fragmented) skb based on a S/G table */
123static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 struct dpaa2_eth_channel *ch,
125 struct dpaa2_sg_entry *sgt)
126{
127 struct sk_buff *skb = NULL;
128 struct device *dev = priv->net_dev->dev.parent;
129 void *sg_vaddr;
130 dma_addr_t sg_addr;
131 u16 sg_offset;
132 u32 sg_length;
133 struct page *page, *head_page;
134 int page_offset;
135 int i;
136
137 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 struct dpaa2_sg_entry *sge = &sgt[i];
139
140 /* NOTE: We only support SG entries in dpaa2_sg_single format,
141 * but this is the only format we may receive from HW anyway
142 */
143
144 /* Get the address and length from the S/G entry */
145 sg_addr = dpaa2_sg_get_addr(sge);
Ioana Radulescu08eb2392017-05-24 07:13:27 -0500146 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500147 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
Ioana Ciocoi Radulescu18c2e772018-11-26 16:27:32 +0000148 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500149
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500150 sg_length = dpaa2_sg_get_len(sge);
151
152 if (i == 0) {
153 /* We build the skb around the first data buffer */
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +0000154 skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
Ioana Radulescucbb3ea42017-10-11 08:29:44 -0500155 if (unlikely(!skb)) {
Ioana Radulescu729d79b2017-10-11 08:29:48 -0500156 /* Free the first SG entry now, since we already
157 * unmapped it and obtained the virtual address
158 */
159 skb_free_frag(sg_vaddr);
160
Ioana Radulescucbb3ea42017-10-11 08:29:44 -0500161 /* We still need to subtract the buffers used
162 * by this FD from our software counter
163 */
164 while (!dpaa2_sg_is_final(&sgt[i]) &&
165 i < DPAA2_ETH_MAX_SG_ENTRIES)
166 i++;
167 break;
168 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500169
170 sg_offset = dpaa2_sg_get_offset(sge);
171 skb_reserve(skb, sg_offset);
172 skb_put(skb, sg_length);
173 } else {
174 /* Rest of the data buffers are stored as skb frags */
175 page = virt_to_page(sg_vaddr);
176 head_page = virt_to_head_page(sg_vaddr);
177
178 /* Offset in page (which may be compound).
179 * Data in subsequent SG entries is stored from the
180 * beginning of the buffer, so we don't need to add the
181 * sg_offset.
182 */
183 page_offset = ((unsigned long)sg_vaddr &
184 (PAGE_SIZE - 1)) +
185 (page_address(page) - page_address(head_page));
186
187 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 sg_length, DPAA2_ETH_RX_BUF_SIZE);
189 }
190
191 if (dpaa2_sg_is_final(sge))
192 break;
193 }
194
Ioana Radulescub63baf72017-10-11 08:29:45 -0500195 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500197 /* Count all data buffers + SG table buffer */
198 ch->buf_count -= i + 2;
199
200 return skb;
201}
202
Ioana Ciocoi Radulescu569375f2018-11-26 16:27:31 +0000203/* Free buffers acquired from the buffer pool or which were meant to
204 * be released in the pool
205 */
206static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
207{
208 struct device *dev = priv->net_dev->dev.parent;
209 void *vaddr;
210 int i;
211
212 for (i = 0; i < count; i++) {
213 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
214 dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
Ioana Ciocoi Radulescu18c2e772018-11-26 16:27:32 +0000215 DMA_BIDIRECTIONAL);
Ioana Ciocoi Radulescu569375f2018-11-26 16:27:31 +0000216 skb_free_frag(vaddr);
217 }
218}
219
Ioana Ciocoi Radulescu5d39dc22018-11-26 16:27:31 +0000220static void xdp_release_buf(struct dpaa2_eth_priv *priv,
221 struct dpaa2_eth_channel *ch,
222 dma_addr_t addr)
223{
224 int err;
225
226 ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
227 if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
228 return;
229
230 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
231 ch->xdp.drop_bufs,
232 ch->xdp.drop_cnt)) == -EBUSY)
233 cpu_relax();
234
235 if (err) {
236 free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
237 ch->buf_count -= ch->xdp.drop_cnt;
238 }
239
240 ch->xdp.drop_cnt = 0;
241}
242
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000243static u32 run_xdp(struct dpaa2_eth_priv *priv,
244 struct dpaa2_eth_channel *ch,
245 struct dpaa2_fd *fd, void *vaddr)
246{
Ioana Ciocoi Radulescu5d39dc22018-11-26 16:27:31 +0000247 dma_addr_t addr = dpaa2_fd_get_addr(fd);
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000248 struct bpf_prog *xdp_prog;
249 struct xdp_buff xdp;
250 u32 xdp_act = XDP_PASS;
251
252 rcu_read_lock();
253
254 xdp_prog = READ_ONCE(ch->xdp.prog);
255 if (!xdp_prog)
256 goto out;
257
258 xdp.data = vaddr + dpaa2_fd_get_offset(fd);
259 xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
Ioana Ciocoi Radulescu7b1eea12018-11-26 16:27:30 +0000260 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000261 xdp_set_data_meta_invalid(&xdp);
262
263 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
264
Ioana Ciocoi Radulescu7b1eea12018-11-26 16:27:30 +0000265 /* xdp.data pointer may have changed */
266 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
267 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
268
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000269 switch (xdp_act) {
270 case XDP_PASS:
271 break;
272 default:
273 bpf_warn_invalid_xdp_action(xdp_act);
274 case XDP_ABORTED:
275 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
276 case XDP_DROP:
Ioana Ciocoi Radulescu5d39dc22018-11-26 16:27:31 +0000277 xdp_release_buf(priv, ch, addr);
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000278 break;
279 }
280
281out:
282 rcu_read_unlock();
283 return xdp_act;
284}
285
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500286/* Main Rx frame processing routine */
287static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
288 struct dpaa2_eth_channel *ch,
289 const struct dpaa2_fd *fd,
Ioana Ciocoi Radulescudbcdf722018-11-14 11:48:35 +0000290 struct dpaa2_eth_fq *fq)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500291{
292 dma_addr_t addr = dpaa2_fd_get_addr(fd);
293 u8 fd_format = dpaa2_fd_get_format(fd);
294 void *vaddr;
295 struct sk_buff *skb;
296 struct rtnl_link_stats64 *percpu_stats;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500297 struct dpaa2_eth_drv_stats *percpu_extras;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500298 struct device *dev = priv->net_dev->dev.parent;
299 struct dpaa2_fas *fas;
Ioana Radulescud695e762017-06-06 10:00:35 -0500300 void *buf_data;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500301 u32 status = 0;
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000302 u32 xdp_act;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500303
Ioana Radulescu56361872017-04-28 04:50:32 -0500304 /* Tracing point */
305 trace_dpaa2_rx_fd(priv->net_dev, fd);
306
Ioana Radulescu08eb2392017-05-24 07:13:27 -0500307 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
Ioana Ciocoi Radulescu5d39dc22018-11-26 16:27:31 +0000308 dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
Ioana Ciocoi Radulescu18c2e772018-11-26 16:27:32 +0000309 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500310
Ioana Radulescu54ce8912017-12-08 06:47:53 -0600311 fas = dpaa2_get_fas(vaddr, false);
Ioana Radulescud695e762017-06-06 10:00:35 -0500312 prefetch(fas);
313 buf_data = vaddr + dpaa2_fd_get_offset(fd);
314 prefetch(buf_data);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500315
316 percpu_stats = this_cpu_ptr(priv->percpu_stats);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500317 percpu_extras = this_cpu_ptr(priv->percpu_extras);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500318
319 if (fd_format == dpaa2_fd_single) {
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000320 xdp_act = run_xdp(priv, ch, (struct dpaa2_fd *)fd, vaddr);
321 if (xdp_act != XDP_PASS) {
322 percpu_stats->rx_packets++;
323 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
324 return;
325 }
326
Ioana Ciocoi Radulescu5d39dc22018-11-26 16:27:31 +0000327 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
Ioana Ciocoi Radulescu18c2e772018-11-26 16:27:32 +0000328 DMA_BIDIRECTIONAL);
Ioana Ciorneifdb6ca92018-10-12 16:27:35 +0000329 skb = build_linear_skb(ch, fd, vaddr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500330 } else if (fd_format == dpaa2_fd_sg) {
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +0000331 WARN_ON(priv->xdp_prog);
332
Ioana Ciocoi Radulescu5d39dc22018-11-26 16:27:31 +0000333 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
Ioana Ciocoi Radulescu18c2e772018-11-26 16:27:32 +0000334 DMA_BIDIRECTIONAL);
Ioana Radulescud695e762017-06-06 10:00:35 -0500335 skb = build_frag_skb(priv, ch, buf_data);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500336 skb_free_frag(vaddr);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500337 percpu_extras->rx_sg_frames++;
338 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500339 } else {
340 /* We don't support any other format */
341 goto err_frame_format;
342 }
343
344 if (unlikely(!skb))
345 goto err_build_skb;
346
347 prefetch(skb->data);
348
Ioana Radulescu859f9982018-04-26 18:23:47 +0800349 /* Get the timestamp value */
350 if (priv->rx_tstamp) {
351 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
352 __le64 *ts = dpaa2_get_ts(vaddr, false);
353 u64 ns;
354
355 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
356
357 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
358 shhwtstamps->hwtstamp = ns_to_ktime(ns);
359 }
360
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500361 /* Check if we need to validate the L4 csum */
362 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500363 status = le32_to_cpu(fas->status);
364 validate_rx_csum(priv, status, skb);
365 }
366
367 skb->protocol = eth_type_trans(skb, priv->net_dev);
Ioana Ciocoi Radulescudbcdf722018-11-14 11:48:35 +0000368 skb_record_rx_queue(skb, fq->flowid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500369
370 percpu_stats->rx_packets++;
371 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
372
Ioana Ciocoi Radulescudbcdf722018-11-14 11:48:35 +0000373 napi_gro_receive(&ch->napi, skb);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500374
375 return;
376
377err_build_skb:
378 free_rx_fd(priv, fd, vaddr);
379err_frame_format:
380 percpu_stats->rx_dropped++;
381}
382
383/* Consume all frames pull-dequeued into the store. This is the simplest way to
384 * make sure we don't accidentally issue another volatile dequeue which would
385 * overwrite (leak) frames already in the store.
386 *
387 * Observance of NAPI budget is not our concern, leaving that to the caller.
388 */
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +0000389static int consume_frames(struct dpaa2_eth_channel *ch,
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000390 struct dpaa2_eth_fq **src)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500391{
392 struct dpaa2_eth_priv *priv = ch->priv;
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +0000393 struct dpaa2_eth_fq *fq = NULL;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500394 struct dpaa2_dq *dq;
395 const struct dpaa2_fd *fd;
396 int cleaned = 0;
397 int is_last;
398
399 do {
400 dq = dpaa2_io_store_next(ch->store, &is_last);
401 if (unlikely(!dq)) {
402 /* If we're here, we *must* have placed a
403 * volatile dequeue comnmand, so keep reading through
404 * the store until we get some sort of valid response
405 * token (either a valid frame or an "empty dequeue")
406 */
407 continue;
408 }
409
410 fd = dpaa2_dq_fd(dq);
Ioana Radulescu75c583a2018-02-26 10:28:06 -0600411 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500412
Ioana Ciocoi Radulescudbcdf722018-11-14 11:48:35 +0000413 fq->consume(priv, ch, fd, fq);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500414 cleaned++;
415 } while (!is_last);
416
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +0000417 if (!cleaned)
418 return 0;
419
420 fq->stats.frames += cleaned;
421 ch->stats.frames += cleaned;
422
423 /* A dequeue operation only pulls frames from a single queue
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000424 * into the store. Return the frame queue as an out param.
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +0000425 */
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000426 if (src)
427 *src = fq;
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +0000428
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500429 return cleaned;
430}
431
Ioana Radulescu859f9982018-04-26 18:23:47 +0800432/* Configure the egress frame annotation for timestamp update */
433static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
434{
435 struct dpaa2_faead *faead;
436 u32 ctrl, frc;
437
438 /* Mark the egress frame annotation area as valid */
439 frc = dpaa2_fd_get_frc(fd);
440 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
441
442 /* Set hardware annotation size */
443 ctrl = dpaa2_fd_get_ctrl(fd);
444 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
445
446 /* enable UPD (update prepanded data) bit in FAEAD field of
447 * hardware frame annotation area
448 */
449 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
450 faead = dpaa2_get_faead(buf_start, true);
451 faead->ctrl = cpu_to_le32(ctrl);
452}
453
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500454/* Create a frame descriptor based on a fragmented skb */
455static int build_sg_fd(struct dpaa2_eth_priv *priv,
456 struct sk_buff *skb,
457 struct dpaa2_fd *fd)
458{
459 struct device *dev = priv->net_dev->dev.parent;
460 void *sgt_buf = NULL;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500461 dma_addr_t addr;
462 int nr_frags = skb_shinfo(skb)->nr_frags;
463 struct dpaa2_sg_entry *sgt;
464 int i, err;
465 int sgt_buf_size;
466 struct scatterlist *scl, *crt_scl;
467 int num_sg;
468 int num_dma_bufs;
469 struct dpaa2_eth_swa *swa;
470
471 /* Create and map scatterlist.
472 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
473 * to go beyond nr_frags+1.
474 * Note: We don't support chained scatterlists
475 */
476 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
477 return -EINVAL;
478
479 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
480 if (unlikely(!scl))
481 return -ENOMEM;
482
483 sg_init_table(scl, nr_frags + 1);
484 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500485 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500486 if (unlikely(!num_dma_bufs)) {
487 err = -ENOMEM;
488 goto dma_map_sg_failed;
489 }
490
491 /* Prepare the HW SGT structure */
492 sgt_buf_size = priv->tx_data_offset +
Ioana Radulescufa722c02018-03-23 08:44:12 -0500493 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500494 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500495 if (unlikely(!sgt_buf)) {
496 err = -ENOMEM;
497 goto sgt_buf_alloc_failed;
498 }
499 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500500 memset(sgt_buf, 0, sgt_buf_size);
501
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500502 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
503
504 /* Fill in the HW SGT structure.
505 *
506 * sgt_buf is zeroed out, so the following fields are implicit
507 * in all sgt entries:
508 * - offset is 0
509 * - format is 'dpaa2_sg_single'
510 */
511 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
512 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
513 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
514 }
515 dpaa2_sg_set_final(&sgt[i - 1], true);
516
517 /* Store the skb backpointer in the SGT buffer.
518 * Fit the scatterlist and the number of buffers alongside the
519 * skb backpointer in the software annotation area. We'll need
520 * all of them on Tx Conf.
521 */
522 swa = (struct dpaa2_eth_swa *)sgt_buf;
523 swa->skb = skb;
524 swa->scl = scl;
525 swa->num_sg = num_sg;
Ioana Radulescub2718e62018-03-23 08:44:11 -0500526 swa->sgt_size = sgt_buf_size;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500527
528 /* Separately map the SGT buffer */
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500529 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500530 if (unlikely(dma_mapping_error(dev, addr))) {
531 err = -ENOMEM;
532 goto dma_map_single_failed;
533 }
534 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
535 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
536 dpaa2_fd_set_addr(fd, addr);
537 dpaa2_fd_set_len(fd, skb->len);
Ioana Radulescub948c8c2018-10-12 16:27:40 +0000538 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500539
Ioana Radulescu859f9982018-04-26 18:23:47 +0800540 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
541 enable_tx_tstamp(fd, sgt_buf);
542
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500543 return 0;
544
545dma_map_single_failed:
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500546 skb_free_frag(sgt_buf);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500547sgt_buf_alloc_failed:
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500548 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500549dma_map_sg_failed:
550 kfree(scl);
551 return err;
552}
553
554/* Create a frame descriptor based on a linear skb */
555static int build_single_fd(struct dpaa2_eth_priv *priv,
556 struct sk_buff *skb,
557 struct dpaa2_fd *fd)
558{
559 struct device *dev = priv->net_dev->dev.parent;
Ioana Radulescuc1636852017-12-08 06:47:58 -0600560 u8 *buffer_start, *aligned_start;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500561 struct sk_buff **skbh;
562 dma_addr_t addr;
563
Ioana Radulescuc1636852017-12-08 06:47:58 -0600564 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
565
566 /* If there's enough room to align the FD address, do it.
567 * It will help hardware optimize accesses.
568 */
569 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
570 DPAA2_ETH_TX_BUF_ALIGN);
571 if (aligned_start >= skb->head)
572 buffer_start = aligned_start;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500573
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500574 /* Store a backpointer to the skb at the beginning of the buffer
575 * (in the private data area) such that we can release it
576 * on Tx confirm
577 */
578 skbh = (struct sk_buff **)buffer_start;
579 *skbh = skb;
580
581 addr = dma_map_single(dev, buffer_start,
582 skb_tail_pointer(skb) - buffer_start,
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500583 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500584 if (unlikely(dma_mapping_error(dev, addr)))
585 return -ENOMEM;
586
587 dpaa2_fd_set_addr(fd, addr);
588 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
589 dpaa2_fd_set_len(fd, skb->len);
590 dpaa2_fd_set_format(fd, dpaa2_fd_single);
Ioana Radulescub948c8c2018-10-12 16:27:40 +0000591 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500592
Ioana Radulescu859f9982018-04-26 18:23:47 +0800593 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
594 enable_tx_tstamp(fd, buffer_start);
595
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500596 return 0;
597}
598
599/* FD freeing routine on the Tx path
600 *
601 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
602 * back-pointed to is also freed.
603 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
604 * dpaa2_eth_tx().
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500605 */
606static void free_tx_fd(const struct dpaa2_eth_priv *priv,
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600607 const struct dpaa2_fd *fd)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500608{
609 struct device *dev = priv->net_dev->dev.parent;
610 dma_addr_t fd_addr;
611 struct sk_buff **skbh, *skb;
612 unsigned char *buffer_start;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500613 struct dpaa2_eth_swa *swa;
614 u8 fd_format = dpaa2_fd_get_format(fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500615
616 fd_addr = dpaa2_fd_get_addr(fd);
Ioana Radulescu08eb2392017-05-24 07:13:27 -0500617 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500618
619 if (fd_format == dpaa2_fd_single) {
620 skb = *skbh;
621 buffer_start = (unsigned char *)skbh;
622 /* Accessing the skb buffer is safe before dma unmap, because
623 * we didn't map the actual skb shell.
624 */
625 dma_unmap_single(dev, fd_addr,
626 skb_tail_pointer(skb) - buffer_start,
Ioana Radulescu1e5fa9e2017-05-24 07:13:28 -0500627 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500628 } else if (fd_format == dpaa2_fd_sg) {
629 swa = (struct dpaa2_eth_swa *)skbh;
630 skb = swa->skb;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500631
632 /* Unmap the scatterlist */
Ioana Radulescub2718e62018-03-23 08:44:11 -0500633 dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
634 kfree(swa->scl);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500635
636 /* Unmap the SGT buffer */
Ioana Radulescub2718e62018-03-23 08:44:11 -0500637 dma_unmap_single(dev, fd_addr, swa->sgt_size,
638 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500639 } else {
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600640 netdev_dbg(priv->net_dev, "Invalid FD format\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500641 return;
642 }
643
Ioana Radulescu859f9982018-04-26 18:23:47 +0800644 /* Get the timestamp value */
645 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
646 struct skb_shared_hwtstamps shhwtstamps;
647 __le64 *ts = dpaa2_get_ts(skbh, true);
648 u64 ns;
649
650 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
651
652 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
653 shhwtstamps.hwtstamp = ns_to_ktime(ns);
654 skb_tstamp_tx(skb, &shhwtstamps);
655 }
656
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500657 /* Free SGT buffer allocated on tx */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500658 if (fd_format != dpaa2_fd_single)
Ioana Radulescu6a9bbe52018-03-14 15:04:51 -0500659 skb_free_frag(skbh);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500660
661 /* Move on with skb release */
662 dev_kfree_skb(skb);
663}
664
Ioana Radulescuc433db42017-06-06 10:00:26 -0500665static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500666{
667 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
668 struct dpaa2_fd fd;
669 struct rtnl_link_stats64 *percpu_stats;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500670 struct dpaa2_eth_drv_stats *percpu_extras;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500671 struct dpaa2_eth_fq *fq;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000672 struct netdev_queue *nq;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500673 u16 queue_mapping;
Ioana Radulescu18c21462017-12-08 06:47:57 -0600674 unsigned int needed_headroom;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000675 u32 fd_len;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500676 int err, i;
677
678 percpu_stats = this_cpu_ptr(priv->percpu_stats);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500679 percpu_extras = this_cpu_ptr(priv->percpu_extras);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500680
Ioana Radulescu18c21462017-12-08 06:47:57 -0600681 needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
682 if (skb_headroom(skb) < needed_headroom) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500683 struct sk_buff *ns;
684
Ioana Radulescu18c21462017-12-08 06:47:57 -0600685 ns = skb_realloc_headroom(skb, needed_headroom);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500686 if (unlikely(!ns)) {
687 percpu_stats->tx_dropped++;
688 goto err_alloc_headroom;
689 }
Ioana Radulescu6662b5e2017-12-08 06:47:55 -0600690 percpu_extras->tx_reallocs++;
Ioana Radulescu859f9982018-04-26 18:23:47 +0800691
692 if (skb->sk)
693 skb_set_owner_w(ns, skb->sk);
694
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500695 dev_kfree_skb(skb);
696 skb = ns;
697 }
698
699 /* We'll be holding a back-reference to the skb until Tx Confirmation;
700 * we don't want that overwritten by a concurrent Tx with a cloned skb.
701 */
702 skb = skb_unshare(skb, GFP_ATOMIC);
703 if (unlikely(!skb)) {
704 /* skb_unshare() has already freed the skb */
705 percpu_stats->tx_dropped++;
706 return NETDEV_TX_OK;
707 }
708
709 /* Setup the FD fields */
710 memset(&fd, 0, sizeof(fd));
711
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500712 if (skb_is_nonlinear(skb)) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500713 err = build_sg_fd(priv, skb, &fd);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500714 percpu_extras->tx_sg_frames++;
715 percpu_extras->tx_sg_bytes += skb->len;
716 } else {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500717 err = build_single_fd(priv, skb, &fd);
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500718 }
719
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500720 if (unlikely(err)) {
721 percpu_stats->tx_dropped++;
722 goto err_build_fd;
723 }
724
Ioana Radulescu56361872017-04-28 04:50:32 -0500725 /* Tracing point */
726 trace_dpaa2_tx_fd(net_dev, &fd);
727
Ioana Radulescu537336c2017-12-21 06:33:20 -0600728 /* TxConf FQ selection relies on queue id from the stack.
729 * In case of a forwarded frame from another DPNI interface, we choose
730 * a queue affined to the same core that processed the Rx frame
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500731 */
Ioana Radulescu537336c2017-12-21 06:33:20 -0600732 queue_mapping = skb_get_queue_mapping(skb);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500733 fq = &priv->fq[queue_mapping];
734 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600735 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
736 priv->tx_qdid, 0,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500737 fq->tx_qdbin, &fd);
738 if (err != -EBUSY)
739 break;
740 }
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500741 percpu_extras->tx_portal_busy += i;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500742 if (unlikely(err < 0)) {
743 percpu_stats->tx_errors++;
744 /* Clean up everything, including freeing the skb */
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600745 free_tx_fd(priv, &fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500746 } else {
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000747 fd_len = dpaa2_fd_get_len(&fd);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500748 percpu_stats->tx_packets++;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000749 percpu_stats->tx_bytes += fd_len;
750
751 nq = netdev_get_tx_queue(net_dev, queue_mapping);
752 netdev_tx_sent_queue(nq, fd_len);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500753 }
754
755 return NETDEV_TX_OK;
756
757err_build_fd:
758err_alloc_headroom:
759 dev_kfree_skb(skb);
760
761 return NETDEV_TX_OK;
762}
763
764/* Tx confirmation frame processing routine */
765static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
Ioana Ciorneib00c8982018-10-12 16:27:38 +0000766 struct dpaa2_eth_channel *ch __always_unused,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500767 const struct dpaa2_fd *fd,
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000768 struct dpaa2_eth_fq *fq)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500769{
770 struct rtnl_link_stats64 *percpu_stats;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500771 struct dpaa2_eth_drv_stats *percpu_extras;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000772 u32 fd_len = dpaa2_fd_get_len(fd);
Ioana Radulescu39163c02017-06-06 10:00:39 -0500773 u32 fd_errors;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500774
Ioana Radulescu56361872017-04-28 04:50:32 -0500775 /* Tracing point */
776 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
777
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500778 percpu_extras = this_cpu_ptr(priv->percpu_extras);
779 percpu_extras->tx_conf_frames++;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +0000780 percpu_extras->tx_conf_bytes += fd_len;
781
782 fq->dq_frames++;
783 fq->dq_bytes += fd_len;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500784
Ioana Radulescu39163c02017-06-06 10:00:39 -0500785 /* Check frame errors in the FD field */
786 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600787 free_tx_fd(priv, fd);
Ioana Radulescu39163c02017-06-06 10:00:39 -0500788
789 if (likely(!fd_errors))
790 return;
791
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -0600792 if (net_ratelimit())
793 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
794 fd_errors);
795
Ioana Radulescu39163c02017-06-06 10:00:39 -0500796 percpu_stats = this_cpu_ptr(priv->percpu_stats);
797 /* Tx-conf logically pertains to the egress path. */
798 percpu_stats->tx_errors++;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500799}
800
801static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
802{
803 int err;
804
805 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
806 DPNI_OFF_RX_L3_CSUM, enable);
807 if (err) {
808 netdev_err(priv->net_dev,
809 "dpni_set_offload(RX_L3_CSUM) failed\n");
810 return err;
811 }
812
813 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
814 DPNI_OFF_RX_L4_CSUM, enable);
815 if (err) {
816 netdev_err(priv->net_dev,
817 "dpni_set_offload(RX_L4_CSUM) failed\n");
818 return err;
819 }
820
821 return 0;
822}
823
824static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
825{
826 int err;
827
828 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
829 DPNI_OFF_TX_L3_CSUM, enable);
830 if (err) {
831 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
832 return err;
833 }
834
835 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
836 DPNI_OFF_TX_L4_CSUM, enable);
837 if (err) {
838 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
839 return err;
840 }
841
842 return 0;
843}
844
845/* Perform a single release command to add buffers
846 * to the specified buffer pool
847 */
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600848static int add_bufs(struct dpaa2_eth_priv *priv,
849 struct dpaa2_eth_channel *ch, u16 bpid)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500850{
851 struct device *dev = priv->net_dev->dev.parent;
852 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
853 void *buf;
854 dma_addr_t addr;
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500855 int i, err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500856
857 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
858 /* Allocate buffer visible to WRIOP + skb shared info +
859 * alignment padding
860 */
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +0000861 buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500862 if (unlikely(!buf))
863 goto err_alloc;
864
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +0000865 buf = PTR_ALIGN(buf, priv->rx_buf_align);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500866
867 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
Ioana Ciocoi Radulescu18c2e772018-11-26 16:27:32 +0000868 DMA_BIDIRECTIONAL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500869 if (unlikely(dma_mapping_error(dev, addr)))
870 goto err_map;
871
872 buf_array[i] = addr;
Ioana Radulescu56361872017-04-28 04:50:32 -0500873
874 /* tracing point */
875 trace_dpaa2_eth_buf_seed(priv->net_dev,
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +0000876 buf, dpaa2_eth_buf_raw_size(priv),
Ioana Radulescu56361872017-04-28 04:50:32 -0500877 addr, DPAA2_ETH_RX_BUF_SIZE,
878 bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500879 }
880
881release_bufs:
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500882 /* In case the portal is busy, retry until successful */
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600883 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500884 buf_array, i)) == -EBUSY)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500885 cpu_relax();
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500886
887 /* If release command failed, clean up and bail out;
888 * not much else we can do about it
889 */
890 if (err) {
891 free_bufs(priv, buf_array, i);
892 return 0;
893 }
894
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500895 return i;
896
897err_map:
898 skb_free_frag(buf);
899err_alloc:
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500900 /* If we managed to allocate at least some buffers,
901 * release them to hardware
902 */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500903 if (i)
904 goto release_bufs;
905
906 return 0;
907}
908
909static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
910{
911 int i, j;
912 int new_count;
913
914 /* This is the lazy seeding of Rx buffer pools.
915 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
916 * napi_alloc_frag(). The trouble with that is that it in turn ends up
917 * calling this_cpu_ptr(), which mandates execution in atomic context.
918 * Rather than splitting up the code, do a one-off preempt disable.
919 */
920 preempt_disable();
921 for (j = 0; j < priv->num_channels; j++) {
922 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
923 i += DPAA2_ETH_BUFS_PER_CMD) {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600924 new_count = add_bufs(priv, priv->channel[j], bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500925 priv->channel[j]->buf_count += new_count;
926
927 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
928 preempt_enable();
929 return -ENOMEM;
930 }
931 }
932 }
933 preempt_enable();
934
935 return 0;
936}
937
938/**
939 * Drain the specified number of buffers from the DPNI's private buffer pool.
940 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
941 */
942static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
943{
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500944 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500945 int ret;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500946
947 do {
Ioana Radulescu05fa39c2017-06-06 10:00:37 -0500948 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500949 buf_array, count);
950 if (ret < 0) {
951 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
952 return;
953 }
Ioana Radulescu87eb55e2017-10-11 08:29:43 -0500954 free_bufs(priv, buf_array, ret);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500955 } while (ret);
956}
957
958static void drain_pool(struct dpaa2_eth_priv *priv)
959{
960 int i;
961
962 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
963 drain_bufs(priv, 1);
964
965 for (i = 0; i < priv->num_channels; i++)
966 priv->channel[i]->buf_count = 0;
967}
968
969/* Function is called from softirq context only, so we don't need to guard
970 * the access to percpu count
971 */
972static int refill_pool(struct dpaa2_eth_priv *priv,
973 struct dpaa2_eth_channel *ch,
974 u16 bpid)
975{
976 int new_count;
977
978 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
979 return 0;
980
981 do {
Ioana Radulescu7ec05962018-01-05 05:04:32 -0600982 new_count = add_bufs(priv, ch, bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -0500983 if (unlikely(!new_count)) {
984 /* Out of memory; abort for now, we'll try later on */
985 break;
986 }
987 ch->buf_count += new_count;
988 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
989
990 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
991 return -ENOMEM;
992
993 return 0;
994}
995
996static int pull_channel(struct dpaa2_eth_channel *ch)
997{
998 int err;
Ioana Radulescu85047ab2017-04-28 04:50:31 -0500999 int dequeues = -1;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001000
1001 /* Retry while portal is busy */
1002 do {
Ioana Radulescu7ec05962018-01-05 05:04:32 -06001003 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1004 ch->store);
Ioana Radulescu85047ab2017-04-28 04:50:31 -05001005 dequeues++;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001006 cpu_relax();
1007 } while (err == -EBUSY);
1008
Ioana Radulescu85047ab2017-04-28 04:50:31 -05001009 ch->stats.dequeue_portal_busy += dequeues;
1010 if (unlikely(err))
1011 ch->stats.pull_err++;
1012
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001013 return err;
1014}
1015
1016/* NAPI poll routine
1017 *
1018 * Frames are dequeued from the QMan channel associated with this NAPI context.
1019 * Rx, Tx confirmation and (if configured) Rx error frames all count
1020 * towards the NAPI budget.
1021 */
1022static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1023{
1024 struct dpaa2_eth_channel *ch;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001025 struct dpaa2_eth_priv *priv;
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001026 int rx_cleaned = 0, txconf_cleaned = 0;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +00001027 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1028 struct netdev_queue *nq;
1029 int store_cleaned, work_done;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001030 int err;
1031
1032 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1033 priv = ch->priv;
1034
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001035 do {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001036 err = pull_channel(ch);
1037 if (unlikely(err))
1038 break;
1039
1040 /* Refill pool if appropriate */
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001041 refill_pool(priv, ch, priv->bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001042
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +00001043 store_cleaned = consume_frames(ch, &fq);
1044 if (!store_cleaned)
1045 break;
1046 if (fq->type == DPAA2_RX_FQ) {
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001047 rx_cleaned += store_cleaned;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +00001048 } else {
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001049 txconf_cleaned += store_cleaned;
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +00001050 /* We have a single Tx conf FQ on this channel */
1051 txc_fq = fq;
1052 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001053
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001054 /* If we either consumed the whole NAPI budget with Rx frames
1055 * or we reached the Tx confirmations threshold, we're done.
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001056 */
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001057 if (rx_cleaned >= budget ||
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +00001058 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1059 work_done = budget;
1060 goto out;
1061 }
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001062 } while (store_cleaned);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001063
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001064 /* We didn't consume the entire budget, so finish napi and
1065 * re-enable data availability notifications
1066 */
1067 napi_complete_done(napi, rx_cleaned);
1068 do {
1069 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1070 cpu_relax();
1071 } while (err == -EBUSY);
1072 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1073 ch->nctx.desired_cpu);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001074
Ioana Ciocoi Radulescu569dac62018-11-14 11:48:36 +00001075 work_done = max(rx_cleaned, 1);
1076
1077out:
1078 if (txc_fq) {
1079 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1080 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1081 txc_fq->dq_bytes);
1082 txc_fq->dq_frames = 0;
1083 txc_fq->dq_bytes = 0;
1084 }
1085
1086 return work_done;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001087}
1088
1089static void enable_ch_napi(struct dpaa2_eth_priv *priv)
1090{
1091 struct dpaa2_eth_channel *ch;
1092 int i;
1093
1094 for (i = 0; i < priv->num_channels; i++) {
1095 ch = priv->channel[i];
1096 napi_enable(&ch->napi);
1097 }
1098}
1099
1100static void disable_ch_napi(struct dpaa2_eth_priv *priv)
1101{
1102 struct dpaa2_eth_channel *ch;
1103 int i;
1104
1105 for (i = 0; i < priv->num_channels; i++) {
1106 ch = priv->channel[i];
1107 napi_disable(&ch->napi);
1108 }
1109}
1110
1111static int link_state_update(struct dpaa2_eth_priv *priv)
1112{
Ioana Ciornei85b7a342018-10-12 16:27:33 +00001113 struct dpni_link_state state = {0};
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001114 int err;
1115
1116 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1117 if (unlikely(err)) {
1118 netdev_err(priv->net_dev,
1119 "dpni_get_link_state() failed\n");
1120 return err;
1121 }
1122
1123 /* Chech link state; speed / duplex changes are not treated yet */
1124 if (priv->link_state.up == state.up)
1125 return 0;
1126
1127 priv->link_state = state;
1128 if (state.up) {
1129 netif_carrier_on(priv->net_dev);
1130 netif_tx_start_all_queues(priv->net_dev);
1131 } else {
1132 netif_tx_stop_all_queues(priv->net_dev);
1133 netif_carrier_off(priv->net_dev);
1134 }
1135
Ioana Radulescu77160af2017-06-06 10:00:28 -05001136 netdev_info(priv->net_dev, "Link Event: state %s\n",
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001137 state.up ? "up" : "down");
1138
1139 return 0;
1140}
1141
1142static int dpaa2_eth_open(struct net_device *net_dev)
1143{
1144 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1145 int err;
1146
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001147 err = seed_pool(priv, priv->bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001148 if (err) {
1149 /* Not much to do; the buffer pool, though not filled up,
1150 * may still contain some buffers which would enable us
1151 * to limp on.
1152 */
1153 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05001154 priv->dpbp_dev->obj_desc.id, priv->bpid);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001155 }
1156
1157 /* We'll only start the txqs when the link is actually ready; make sure
1158 * we don't race against the link up notification, which may come
1159 * immediately after dpni_enable();
1160 */
1161 netif_tx_stop_all_queues(net_dev);
1162 enable_ch_napi(priv);
1163 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1164 * return true and cause 'ip link show' to report the LOWER_UP flag,
1165 * even though the link notification wasn't even received.
1166 */
1167 netif_carrier_off(net_dev);
1168
1169 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1170 if (err < 0) {
1171 netdev_err(net_dev, "dpni_enable() failed\n");
1172 goto enable_err;
1173 }
1174
1175 /* If the DPMAC object has already processed the link up interrupt,
1176 * we have to learn the link state ourselves.
1177 */
1178 err = link_state_update(priv);
1179 if (err < 0) {
1180 netdev_err(net_dev, "Can't update link state\n");
1181 goto link_state_err;
1182 }
1183
1184 return 0;
1185
1186link_state_err:
1187enable_err:
1188 disable_ch_napi(priv);
1189 drain_pool(priv);
1190 return err;
1191}
1192
1193/* The DPIO store must be empty when we call this,
1194 * at the end of every NAPI cycle.
1195 */
Ioana Ciorneifdb6ca92018-10-12 16:27:35 +00001196static u32 drain_channel(struct dpaa2_eth_channel *ch)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001197{
1198 u32 drained = 0, total = 0;
1199
1200 do {
1201 pull_channel(ch);
Ioana Ciocoi Radulescu68049a52018-10-08 14:16:31 +00001202 drained = consume_frames(ch, NULL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001203 total += drained;
1204 } while (drained);
1205
1206 return total;
1207}
1208
1209static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1210{
1211 struct dpaa2_eth_channel *ch;
1212 int i;
1213 u32 drained = 0;
1214
1215 for (i = 0; i < priv->num_channels; i++) {
1216 ch = priv->channel[i];
Ioana Ciorneifdb6ca92018-10-12 16:27:35 +00001217 drained += drain_channel(ch);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001218 }
1219
1220 return drained;
1221}
1222
1223static int dpaa2_eth_stop(struct net_device *net_dev)
1224{
1225 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
Ioana Ciornei85b7a342018-10-12 16:27:33 +00001226 int dpni_enabled = 0;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001227 int retries = 10;
1228 u32 drained;
1229
1230 netif_tx_stop_all_queues(net_dev);
1231 netif_carrier_off(net_dev);
1232
1233 /* Loop while dpni_disable() attempts to drain the egress FQs
1234 * and confirm them back to us.
1235 */
1236 do {
1237 dpni_disable(priv->mc_io, 0, priv->mc_token);
1238 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1239 if (dpni_enabled)
1240 /* Allow the hardware some slack */
1241 msleep(100);
1242 } while (dpni_enabled && --retries);
1243 if (!retries) {
1244 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1245 /* Must go on and disable NAPI nonetheless, so we don't crash at
1246 * the next "ifconfig up"
1247 */
1248 }
1249
1250 /* Wait for NAPI to complete on every core and disable it.
1251 * In particular, this will also prevent NAPI from being rescheduled if
1252 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1253 * don't even need to disarm the channels, except perhaps for the case
1254 * of a huge coalescing value.
1255 */
1256 disable_ch_napi(priv);
1257
1258 /* Manually drain the Rx and TxConf queues */
1259 drained = drain_ingress_frames(priv);
1260 if (drained)
1261 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1262
1263 /* Empty the buffer pool */
1264 drain_pool(priv);
1265
1266 return 0;
1267}
1268
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001269static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1270{
1271 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1272 struct device *dev = net_dev->dev.parent;
1273 int err;
1274
1275 err = eth_mac_addr(net_dev, addr);
1276 if (err < 0) {
1277 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1278 return err;
1279 }
1280
1281 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1282 net_dev->dev_addr);
1283 if (err) {
1284 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1285 return err;
1286 }
1287
1288 return 0;
1289}
1290
1291/** Fill in counters maintained by the GPP driver. These may be different from
1292 * the hardware counters obtained by ethtool.
1293 */
Ioana Radulescuacbff8e2017-06-06 10:00:24 -05001294static void dpaa2_eth_get_stats(struct net_device *net_dev,
1295 struct rtnl_link_stats64 *stats)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001296{
1297 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1298 struct rtnl_link_stats64 *percpu_stats;
1299 u64 *cpustats;
1300 u64 *netstats = (u64 *)stats;
1301 int i, j;
1302 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1303
1304 for_each_possible_cpu(i) {
1305 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1306 cpustats = (u64 *)percpu_stats;
1307 for (j = 0; j < num; j++)
1308 netstats[j] += cpustats[j];
1309 }
1310}
1311
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001312/* Copy mac unicast addresses from @net_dev to @priv.
1313 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1314 */
1315static void add_uc_hw_addr(const struct net_device *net_dev,
1316 struct dpaa2_eth_priv *priv)
1317{
1318 struct netdev_hw_addr *ha;
1319 int err;
1320
1321 netdev_for_each_uc_addr(ha, net_dev) {
1322 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1323 ha->addr);
1324 if (err)
1325 netdev_warn(priv->net_dev,
1326 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1327 ha->addr, err);
1328 }
1329}
1330
1331/* Copy mac multicast addresses from @net_dev to @priv
1332 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1333 */
1334static void add_mc_hw_addr(const struct net_device *net_dev,
1335 struct dpaa2_eth_priv *priv)
1336{
1337 struct netdev_hw_addr *ha;
1338 int err;
1339
1340 netdev_for_each_mc_addr(ha, net_dev) {
1341 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1342 ha->addr);
1343 if (err)
1344 netdev_warn(priv->net_dev,
1345 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1346 ha->addr, err);
1347 }
1348}
1349
1350static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1351{
1352 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1353 int uc_count = netdev_uc_count(net_dev);
1354 int mc_count = netdev_mc_count(net_dev);
1355 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1356 u32 options = priv->dpni_attrs.options;
1357 u16 mc_token = priv->mc_token;
1358 struct fsl_mc_io *mc_io = priv->mc_io;
1359 int err;
1360
1361 /* Basic sanity checks; these probably indicate a misconfiguration */
1362 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1363 netdev_info(net_dev,
1364 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1365 max_mac);
1366
1367 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1368 if (uc_count > max_mac) {
1369 netdev_info(net_dev,
1370 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1371 uc_count, max_mac);
1372 goto force_promisc;
1373 }
1374 if (mc_count + uc_count > max_mac) {
1375 netdev_info(net_dev,
1376 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1377 uc_count + mc_count, max_mac);
1378 goto force_mc_promisc;
1379 }
1380
1381 /* Adjust promisc settings due to flag combinations */
1382 if (net_dev->flags & IFF_PROMISC)
1383 goto force_promisc;
1384 if (net_dev->flags & IFF_ALLMULTI) {
1385 /* First, rebuild unicast filtering table. This should be done
1386 * in promisc mode, in order to avoid frame loss while we
1387 * progressively add entries to the table.
1388 * We don't know whether we had been in promisc already, and
1389 * making an MC call to find out is expensive; so set uc promisc
1390 * nonetheless.
1391 */
1392 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1393 if (err)
1394 netdev_warn(net_dev, "Can't set uc promisc\n");
1395
1396 /* Actual uc table reconstruction. */
1397 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1398 if (err)
1399 netdev_warn(net_dev, "Can't clear uc filters\n");
1400 add_uc_hw_addr(net_dev, priv);
1401
1402 /* Finally, clear uc promisc and set mc promisc as requested. */
1403 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1404 if (err)
1405 netdev_warn(net_dev, "Can't clear uc promisc\n");
1406 goto force_mc_promisc;
1407 }
1408
1409 /* Neither unicast, nor multicast promisc will be on... eventually.
1410 * For now, rebuild mac filtering tables while forcing both of them on.
1411 */
1412 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1413 if (err)
1414 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1415 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1416 if (err)
1417 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1418
1419 /* Actual mac filtering tables reconstruction */
1420 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1421 if (err)
1422 netdev_warn(net_dev, "Can't clear mac filters\n");
1423 add_mc_hw_addr(net_dev, priv);
1424 add_uc_hw_addr(net_dev, priv);
1425
1426 /* Now we can clear both ucast and mcast promisc, without risking
1427 * to drop legitimate frames anymore.
1428 */
1429 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1430 if (err)
1431 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1432 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1433 if (err)
1434 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1435
1436 return;
1437
1438force_promisc:
1439 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1440 if (err)
1441 netdev_warn(net_dev, "Can't set ucast promisc\n");
1442force_mc_promisc:
1443 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1444 if (err)
1445 netdev_warn(net_dev, "Can't set mcast promisc\n");
1446}
1447
1448static int dpaa2_eth_set_features(struct net_device *net_dev,
1449 netdev_features_t features)
1450{
1451 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1452 netdev_features_t changed = features ^ net_dev->features;
1453 bool enable;
1454 int err;
1455
1456 if (changed & NETIF_F_RXCSUM) {
1457 enable = !!(features & NETIF_F_RXCSUM);
1458 err = set_rx_csum(priv, enable);
1459 if (err)
1460 return err;
1461 }
1462
1463 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1464 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1465 err = set_tx_csum(priv, enable);
1466 if (err)
1467 return err;
1468 }
1469
1470 return 0;
1471}
1472
Ioana Radulescu859f9982018-04-26 18:23:47 +08001473static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1474{
1475 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1476 struct hwtstamp_config config;
1477
1478 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1479 return -EFAULT;
1480
1481 switch (config.tx_type) {
1482 case HWTSTAMP_TX_OFF:
1483 priv->tx_tstamp = false;
1484 break;
1485 case HWTSTAMP_TX_ON:
1486 priv->tx_tstamp = true;
1487 break;
1488 default:
1489 return -ERANGE;
1490 }
1491
1492 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1493 priv->rx_tstamp = false;
1494 } else {
1495 priv->rx_tstamp = true;
1496 /* TS is set for all frame types, not only those requested */
1497 config.rx_filter = HWTSTAMP_FILTER_ALL;
1498 }
1499
1500 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1501 -EFAULT : 0;
1502}
1503
1504static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1505{
1506 if (cmd == SIOCSHWTSTAMP)
1507 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1508
1509 return -EINVAL;
1510}
1511
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +00001512static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1513{
1514 int mfl, linear_mfl;
1515
1516 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1517 linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
Ioana Ciocoi Radulescu7b1eea12018-11-26 16:27:30 +00001518 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +00001519
1520 if (mfl > linear_mfl) {
1521 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
1522 linear_mfl - VLAN_ETH_HLEN);
1523 return false;
1524 }
1525
1526 return true;
1527}
1528
1529static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
1530{
1531 int mfl, err;
1532
1533 /* We enforce a maximum Rx frame length based on MTU only if we have
1534 * an XDP program attached (in order to avoid Rx S/G frames).
1535 * Otherwise, we accept all incoming frames as long as they are not
1536 * larger than maximum size supported in hardware
1537 */
1538 if (has_xdp)
1539 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1540 else
1541 mfl = DPAA2_ETH_MFL;
1542
1543 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
1544 if (err) {
1545 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
1546 return err;
1547 }
1548
1549 return 0;
1550}
1551
1552static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
1553{
1554 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1555 int err;
1556
1557 if (!priv->xdp_prog)
1558 goto out;
1559
1560 if (!xdp_mtu_valid(priv, new_mtu))
1561 return -EINVAL;
1562
1563 err = set_rx_mfl(priv, new_mtu, true);
1564 if (err)
1565 return err;
1566
1567out:
1568 dev->mtu = new_mtu;
1569 return 0;
1570}
1571
Ioana Ciocoi Radulescu7b1eea12018-11-26 16:27:30 +00001572static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
1573{
1574 struct dpni_buffer_layout buf_layout = {0};
1575 int err;
1576
1577 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
1578 DPNI_QUEUE_RX, &buf_layout);
1579 if (err) {
1580 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
1581 return err;
1582 }
1583
1584 /* Reserve extra headroom for XDP header size changes */
1585 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
1586 (has_xdp ? XDP_PACKET_HEADROOM : 0);
1587 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
1588 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1589 DPNI_QUEUE_RX, &buf_layout);
1590 if (err) {
1591 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
1592 return err;
1593 }
1594
1595 return 0;
1596}
1597
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +00001598static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
1599{
1600 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1601 struct dpaa2_eth_channel *ch;
1602 struct bpf_prog *old;
1603 bool up, need_update;
1604 int i, err;
1605
1606 if (prog && !xdp_mtu_valid(priv, dev->mtu))
1607 return -EINVAL;
1608
1609 if (prog) {
1610 prog = bpf_prog_add(prog, priv->num_channels);
1611 if (IS_ERR(prog))
1612 return PTR_ERR(prog);
1613 }
1614
1615 up = netif_running(dev);
1616 need_update = (!!priv->xdp_prog != !!prog);
1617
1618 if (up)
1619 dpaa2_eth_stop(dev);
1620
Ioana Ciocoi Radulescu7b1eea12018-11-26 16:27:30 +00001621 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
1622 * Also, when switching between xdp/non-xdp modes we need to reconfigure
1623 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
1624 * so we are sure no old format buffers will be used from now on.
1625 */
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +00001626 if (need_update) {
1627 err = set_rx_mfl(priv, dev->mtu, !!prog);
1628 if (err)
1629 goto out_err;
Ioana Ciocoi Radulescu7b1eea12018-11-26 16:27:30 +00001630 err = update_rx_buffer_headroom(priv, !!prog);
1631 if (err)
1632 goto out_err;
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +00001633 }
1634
1635 old = xchg(&priv->xdp_prog, prog);
1636 if (old)
1637 bpf_prog_put(old);
1638
1639 for (i = 0; i < priv->num_channels; i++) {
1640 ch = priv->channel[i];
1641 old = xchg(&ch->xdp.prog, prog);
1642 if (old)
1643 bpf_prog_put(old);
1644 }
1645
1646 if (up) {
1647 err = dpaa2_eth_open(dev);
1648 if (err)
1649 return err;
1650 }
1651
1652 return 0;
1653
1654out_err:
1655 if (prog)
1656 bpf_prog_sub(prog, priv->num_channels);
1657 if (up)
1658 dpaa2_eth_open(dev);
1659
1660 return err;
1661}
1662
1663static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1664{
1665 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1666
1667 switch (xdp->command) {
1668 case XDP_SETUP_PROG:
1669 return setup_xdp(dev, xdp->prog);
1670 case XDP_QUERY_PROG:
1671 xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1672 break;
1673 default:
1674 return -EINVAL;
1675 }
1676
1677 return 0;
1678}
1679
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001680static const struct net_device_ops dpaa2_eth_ops = {
1681 .ndo_open = dpaa2_eth_open,
1682 .ndo_start_xmit = dpaa2_eth_tx,
1683 .ndo_stop = dpaa2_eth_stop,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001684 .ndo_set_mac_address = dpaa2_eth_set_addr,
1685 .ndo_get_stats64 = dpaa2_eth_get_stats,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001686 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1687 .ndo_set_features = dpaa2_eth_set_features,
Ioana Radulescu859f9982018-04-26 18:23:47 +08001688 .ndo_do_ioctl = dpaa2_eth_ioctl,
Ioana Ciocoi Radulescu7e273a82018-11-26 16:27:29 +00001689 .ndo_change_mtu = dpaa2_eth_change_mtu,
1690 .ndo_bpf = dpaa2_eth_xdp,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001691};
1692
1693static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1694{
1695 struct dpaa2_eth_channel *ch;
1696
1697 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
Ioana Radulescu85047ab2017-04-28 04:50:31 -05001698
1699 /* Update NAPI statistics */
1700 ch->stats.cdan++;
1701
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001702 napi_schedule_irqoff(&ch->napi);
1703}
1704
1705/* Allocate and configure a DPCON object */
1706static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1707{
1708 struct fsl_mc_device *dpcon;
1709 struct device *dev = priv->net_dev->dev.parent;
1710 struct dpcon_attr attrs;
1711 int err;
1712
1713 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1714 FSL_MC_POOL_DPCON, &dpcon);
1715 if (err) {
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00001716 if (err == -ENXIO)
1717 err = -EPROBE_DEFER;
1718 else
1719 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1720 return ERR_PTR(err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001721 }
1722
1723 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1724 if (err) {
1725 dev_err(dev, "dpcon_open() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001726 goto free;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001727 }
1728
1729 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1730 if (err) {
1731 dev_err(dev, "dpcon_reset() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001732 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001733 }
1734
1735 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1736 if (err) {
1737 dev_err(dev, "dpcon_get_attributes() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001738 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001739 }
1740
1741 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1742 if (err) {
1743 dev_err(dev, "dpcon_enable() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001744 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001745 }
1746
1747 return dpcon;
1748
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001749close:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001750 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
Ioana Radulescuf6dda802017-10-29 08:20:39 +00001751free:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001752 fsl_mc_object_free(dpcon);
1753
1754 return NULL;
1755}
1756
1757static void free_dpcon(struct dpaa2_eth_priv *priv,
1758 struct fsl_mc_device *dpcon)
1759{
1760 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1761 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1762 fsl_mc_object_free(dpcon);
1763}
1764
1765static struct dpaa2_eth_channel *
1766alloc_channel(struct dpaa2_eth_priv *priv)
1767{
1768 struct dpaa2_eth_channel *channel;
1769 struct dpcon_attr attr;
1770 struct device *dev = priv->net_dev->dev.parent;
1771 int err;
1772
1773 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1774 if (!channel)
1775 return NULL;
1776
1777 channel->dpcon = setup_dpcon(priv);
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00001778 if (IS_ERR_OR_NULL(channel->dpcon)) {
1779 err = PTR_ERR(channel->dpcon);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001780 goto err_setup;
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00001781 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001782
1783 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1784 &attr);
1785 if (err) {
1786 dev_err(dev, "dpcon_get_attributes() failed\n");
1787 goto err_get_attr;
1788 }
1789
1790 channel->dpcon_id = attr.id;
1791 channel->ch_id = attr.qbman_ch_id;
1792 channel->priv = priv;
1793
1794 return channel;
1795
1796err_get_attr:
1797 free_dpcon(priv, channel->dpcon);
1798err_setup:
1799 kfree(channel);
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00001800 return ERR_PTR(err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001801}
1802
1803static void free_channel(struct dpaa2_eth_priv *priv,
1804 struct dpaa2_eth_channel *channel)
1805{
1806 free_dpcon(priv, channel->dpcon);
1807 kfree(channel);
1808}
1809
1810/* DPIO setup: allocate and configure QBMan channels, setup core affinity
1811 * and register data availability notifications
1812 */
1813static int setup_dpio(struct dpaa2_eth_priv *priv)
1814{
1815 struct dpaa2_io_notification_ctx *nctx;
1816 struct dpaa2_eth_channel *channel;
1817 struct dpcon_notification_cfg dpcon_notif_cfg;
1818 struct device *dev = priv->net_dev->dev.parent;
1819 int i, err;
1820
1821 /* We want the ability to spread ingress traffic (RX, TX conf) to as
1822 * many cores as possible, so we need one channel for each core
1823 * (unless there's fewer queues than cores, in which case the extra
1824 * channels would be wasted).
1825 * Allocate one channel per core and register it to the core's
1826 * affine DPIO. If not enough channels are available for all cores
1827 * or if some cores don't have an affine DPIO, there will be no
1828 * ingress frame processing on those cores.
1829 */
1830 cpumask_clear(&priv->dpio_cpumask);
1831 for_each_online_cpu(i) {
1832 /* Try to allocate a channel */
1833 channel = alloc_channel(priv);
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00001834 if (IS_ERR_OR_NULL(channel)) {
1835 err = PTR_ERR(channel);
1836 if (err != -EPROBE_DEFER)
1837 dev_info(dev,
1838 "No affine channel for cpu %d and above\n", i);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001839 goto err_alloc_ch;
1840 }
1841
1842 priv->channel[priv->num_channels] = channel;
1843
1844 nctx = &channel->nctx;
1845 nctx->is_cdan = 1;
1846 nctx->cb = cdan_cb;
1847 nctx->id = channel->ch_id;
1848 nctx->desired_cpu = i;
1849
1850 /* Register the new context */
Ioana Radulescu7ec05962018-01-05 05:04:32 -06001851 channel->dpio = dpaa2_io_service_select(i);
1852 err = dpaa2_io_service_register(channel->dpio, nctx);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001853 if (err) {
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001854 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001855 /* If no affine DPIO for this core, there's probably
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001856 * none available for next cores either. Signal we want
1857 * to retry later, in case the DPIO devices weren't
1858 * probed yet.
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001859 */
Ioana Radulescu5206d8d2017-06-06 10:00:33 -05001860 err = -EPROBE_DEFER;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001861 goto err_service_reg;
1862 }
1863
1864 /* Register DPCON notification with MC */
1865 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1866 dpcon_notif_cfg.priority = 0;
1867 dpcon_notif_cfg.user_ctx = nctx->qman64;
1868 err = dpcon_set_notification(priv->mc_io, 0,
1869 channel->dpcon->mc_handle,
1870 &dpcon_notif_cfg);
1871 if (err) {
1872 dev_err(dev, "dpcon_set_notification failed()\n");
1873 goto err_set_cdan;
1874 }
1875
1876 /* If we managed to allocate a channel and also found an affine
1877 * DPIO for this core, add it to the final mask
1878 */
1879 cpumask_set_cpu(i, &priv->dpio_cpumask);
1880 priv->num_channels++;
1881
1882 /* Stop if we already have enough channels to accommodate all
1883 * RX and TX conf queues
1884 */
Ioana Ciocoi Radulescub0e4f372018-11-14 11:48:35 +00001885 if (priv->num_channels == priv->dpni_attrs.num_queues)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001886 break;
1887 }
1888
1889 return 0;
1890
1891err_set_cdan:
Ioana Radulescu7ec05962018-01-05 05:04:32 -06001892 dpaa2_io_service_deregister(channel->dpio, nctx);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001893err_service_reg:
1894 free_channel(priv, channel);
1895err_alloc_ch:
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00001896 if (err == -EPROBE_DEFER)
1897 return err;
1898
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001899 if (cpumask_empty(&priv->dpio_cpumask)) {
1900 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00001901 return -ENODEV;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001902 }
1903
1904 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1905 cpumask_pr_args(&priv->dpio_cpumask));
1906
1907 return 0;
1908}
1909
1910static void free_dpio(struct dpaa2_eth_priv *priv)
1911{
1912 int i;
1913 struct dpaa2_eth_channel *ch;
1914
1915 /* deregister CDAN notifications and free channels */
1916 for (i = 0; i < priv->num_channels; i++) {
1917 ch = priv->channel[i];
Ioana Radulescu7ec05962018-01-05 05:04:32 -06001918 dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001919 free_channel(priv, ch);
1920 }
1921}
1922
1923static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1924 int cpu)
1925{
1926 struct device *dev = priv->net_dev->dev.parent;
1927 int i;
1928
1929 for (i = 0; i < priv->num_channels; i++)
1930 if (priv->channel[i]->nctx.desired_cpu == cpu)
1931 return priv->channel[i];
1932
1933 /* We should never get here. Issue a warning and return
1934 * the first channel, because it's still better than nothing
1935 */
1936 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1937
1938 return priv->channel[0];
1939}
1940
1941static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1942{
1943 struct device *dev = priv->net_dev->dev.parent;
Ioana Radulescu93ddf0b2017-12-21 06:33:21 -06001944 struct cpumask xps_mask;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001945 struct dpaa2_eth_fq *fq;
1946 int rx_cpu, txc_cpu;
Ioana Radulescu93ddf0b2017-12-21 06:33:21 -06001947 int i, err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001948
1949 /* For each FQ, pick one channel/CPU to deliver frames to.
1950 * This may well change at runtime, either through irqbalance or
1951 * through direct user intervention.
1952 */
1953 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1954
1955 for (i = 0; i < priv->num_fqs; i++) {
1956 fq = &priv->fq[i];
1957 switch (fq->type) {
1958 case DPAA2_RX_FQ:
1959 fq->target_cpu = rx_cpu;
1960 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1961 if (rx_cpu >= nr_cpu_ids)
1962 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1963 break;
1964 case DPAA2_TX_CONF_FQ:
1965 fq->target_cpu = txc_cpu;
Ioana Radulescu93ddf0b2017-12-21 06:33:21 -06001966
1967 /* Tell the stack to affine to txc_cpu the Tx queue
1968 * associated with the confirmation one
1969 */
1970 cpumask_clear(&xps_mask);
1971 cpumask_set_cpu(txc_cpu, &xps_mask);
1972 err = netif_set_xps_queue(priv->net_dev, &xps_mask,
1973 fq->flowid);
1974 if (err)
1975 dev_err(dev, "Error setting XPS queue\n");
1976
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05001977 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1978 if (txc_cpu >= nr_cpu_ids)
1979 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1980 break;
1981 default:
1982 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1983 }
1984 fq->channel = get_affine_channel(priv, fq->target_cpu);
1985 }
1986}
1987
1988static void setup_fqs(struct dpaa2_eth_priv *priv)
1989{
1990 int i;
1991
1992 /* We have one TxConf FQ per Tx flow.
1993 * The number of Tx and Rx queues is the same.
1994 * Tx queues come first in the fq array.
1995 */
1996 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1997 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1998 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1999 priv->fq[priv->num_fqs++].flowid = (u16)i;
2000 }
2001
2002 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2003 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2004 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2005 priv->fq[priv->num_fqs++].flowid = (u16)i;
2006 }
2007
2008 /* For each FQ, decide on which core to process incoming frames */
2009 set_fq_affinity(priv);
2010}
2011
2012/* Allocate and configure one buffer pool for each interface */
2013static int setup_dpbp(struct dpaa2_eth_priv *priv)
2014{
2015 int err;
2016 struct fsl_mc_device *dpbp_dev;
2017 struct device *dev = priv->net_dev->dev.parent;
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05002018 struct dpbp_attr dpbp_attrs;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002019
2020 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2021 &dpbp_dev);
2022 if (err) {
Ioana Ciorneid7f5a9d2018-11-09 15:26:45 +00002023 if (err == -ENXIO)
2024 err = -EPROBE_DEFER;
2025 else
2026 dev_err(dev, "DPBP device allocation failed\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002027 return err;
2028 }
2029
2030 priv->dpbp_dev = dpbp_dev;
2031
2032 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2033 &dpbp_dev->mc_handle);
2034 if (err) {
2035 dev_err(dev, "dpbp_open() failed\n");
2036 goto err_open;
2037 }
2038
Ioana Radulescud00defe2017-06-06 10:00:32 -05002039 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2040 if (err) {
2041 dev_err(dev, "dpbp_reset() failed\n");
2042 goto err_reset;
2043 }
2044
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002045 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2046 if (err) {
2047 dev_err(dev, "dpbp_enable() failed\n");
2048 goto err_enable;
2049 }
2050
2051 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05002052 &dpbp_attrs);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002053 if (err) {
2054 dev_err(dev, "dpbp_get_attributes() failed\n");
2055 goto err_get_attr;
2056 }
Ioana Radulescu05fa39c2017-06-06 10:00:37 -05002057 priv->bpid = dpbp_attrs.bpid;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002058
2059 return 0;
2060
2061err_get_attr:
2062 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2063err_enable:
Ioana Radulescud00defe2017-06-06 10:00:32 -05002064err_reset:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002065 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2066err_open:
2067 fsl_mc_object_free(dpbp_dev);
2068
2069 return err;
2070}
2071
2072static void free_dpbp(struct dpaa2_eth_priv *priv)
2073{
2074 drain_pool(priv);
2075 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2076 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2077 fsl_mc_object_free(priv->dpbp_dev);
2078}
2079
Ioana Radulescu308f64e2017-10-29 08:20:40 +00002080static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2081{
2082 struct device *dev = priv->net_dev->dev.parent;
2083 struct dpni_buffer_layout buf_layout = {0};
2084 int err;
2085
Bogdan Purcareata8a4fd872017-10-29 08:20:42 +00002086 /* We need to check for WRIOP version 1.0.0, but depending on the MC
2087 * version, this number is not always provided correctly on rev1.
2088 * We need to check for both alternatives in this situation.
2089 */
2090 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2091 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2092 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2093 else
2094 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2095
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00002096 /* tx buffer */
Ioana Radulescu308f64e2017-10-29 08:20:40 +00002097 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
Ioana Radulescu859f9982018-04-26 18:23:47 +08002098 buf_layout.pass_timestamp = true;
2099 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2100 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
Ioana Radulescu308f64e2017-10-29 08:20:40 +00002101 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2102 DPNI_QUEUE_TX, &buf_layout);
2103 if (err) {
2104 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2105 return err;
2106 }
2107
2108 /* tx-confirm buffer */
Ioana Radulescu859f9982018-04-26 18:23:47 +08002109 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
Ioana Radulescu308f64e2017-10-29 08:20:40 +00002110 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2111 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2112 if (err) {
2113 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2114 return err;
2115 }
2116
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00002117 /* Now that we've set our tx buffer layout, retrieve the minimum
2118 * required tx data offset.
2119 */
2120 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2121 &priv->tx_data_offset);
2122 if (err) {
2123 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2124 return err;
2125 }
2126
2127 if ((priv->tx_data_offset % 64) != 0)
2128 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2129 priv->tx_data_offset);
2130
2131 /* rx buffer */
Ioana Radulescu2b7c86e2017-12-08 06:47:56 -06002132 buf_layout.pass_frame_status = true;
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00002133 buf_layout.pass_parser_result = true;
2134 buf_layout.data_align = priv->rx_buf_align;
2135 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2136 buf_layout.private_data_size = 0;
2137 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2138 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2139 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
Ioana Radulescu859f9982018-04-26 18:23:47 +08002140 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2141 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
Bogdan Purcareata4b2d9fe2017-10-29 08:20:43 +00002142 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2143 DPNI_QUEUE_RX, &buf_layout);
2144 if (err) {
2145 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2146 return err;
2147 }
2148
Ioana Radulescu308f64e2017-10-29 08:20:40 +00002149 return 0;
2150}
2151
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002152/* Configure the DPNI object this interface is associated with */
2153static int setup_dpni(struct fsl_mc_device *ls_dev)
2154{
2155 struct device *dev = &ls_dev->dev;
2156 struct dpaa2_eth_priv *priv;
2157 struct net_device *net_dev;
2158 int err;
2159
2160 net_dev = dev_get_drvdata(dev);
2161 priv = netdev_priv(net_dev);
2162
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002163 /* get a handle for the DPNI object */
Ioana Radulescu50eacbc2017-06-06 10:00:36 -05002164 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002165 if (err) {
2166 dev_err(dev, "dpni_open() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00002167 return err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002168 }
2169
Ioana Radulescu311cffa2018-03-23 08:44:09 -05002170 /* Check if we can work with this DPNI object */
2171 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
2172 &priv->dpni_ver_minor);
2173 if (err) {
2174 dev_err(dev, "dpni_get_api_version() failed\n");
2175 goto close;
2176 }
2177 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
2178 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
2179 priv->dpni_ver_major, priv->dpni_ver_minor,
2180 DPNI_VER_MAJOR, DPNI_VER_MINOR);
2181 err = -ENOTSUPP;
2182 goto close;
2183 }
2184
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002185 ls_dev->mc_io = priv->mc_io;
2186 ls_dev->mc_handle = priv->mc_token;
2187
2188 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2189 if (err) {
2190 dev_err(dev, "dpni_reset() failed\n");
Ioana Radulescuf6dda802017-10-29 08:20:39 +00002191 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002192 }
2193
2194 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
2195 &priv->dpni_attrs);
2196 if (err) {
2197 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
Ioana Radulescuf6dda802017-10-29 08:20:39 +00002198 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002199 }
2200
Ioana Radulescu308f64e2017-10-29 08:20:40 +00002201 err = set_buffer_layout(priv);
2202 if (err)
Ioana Radulescuf6dda802017-10-29 08:20:39 +00002203 goto close;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002204
Ioana Radulescuafb90db2018-10-01 13:44:58 +03002205 priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
2206 dpaa2_eth_fs_count(priv), GFP_KERNEL);
2207 if (!priv->cls_rules)
2208 goto close;
2209
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002210 return 0;
2211
Ioana Radulescuf6dda802017-10-29 08:20:39 +00002212close:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002213 dpni_close(priv->mc_io, 0, priv->mc_token);
Ioana Radulescuf6dda802017-10-29 08:20:39 +00002214
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002215 return err;
2216}
2217
2218static void free_dpni(struct dpaa2_eth_priv *priv)
2219{
2220 int err;
2221
2222 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2223 if (err)
2224 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
2225 err);
2226
2227 dpni_close(priv->mc_io, 0, priv->mc_token);
2228}
2229
2230static int setup_rx_flow(struct dpaa2_eth_priv *priv,
2231 struct dpaa2_eth_fq *fq)
2232{
2233 struct device *dev = priv->net_dev->dev.parent;
2234 struct dpni_queue queue;
2235 struct dpni_queue_id qid;
2236 struct dpni_taildrop td;
2237 int err;
2238
2239 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2240 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
2241 if (err) {
2242 dev_err(dev, "dpni_get_queue(RX) failed\n");
2243 return err;
2244 }
2245
2246 fq->fqid = qid.fqid;
2247
2248 queue.destination.id = fq->channel->dpcon_id;
2249 queue.destination.type = DPNI_DEST_DPCON;
2250 queue.destination.priority = 1;
Ioana Radulescu75c583a2018-02-26 10:28:06 -06002251 queue.user_context = (u64)(uintptr_t)fq;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002252 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2253 DPNI_QUEUE_RX, 0, fq->flowid,
2254 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2255 &queue);
2256 if (err) {
2257 dev_err(dev, "dpni_set_queue(RX) failed\n");
2258 return err;
2259 }
2260
2261 td.enable = 1;
2262 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
2263 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
2264 DPNI_QUEUE_RX, 0, fq->flowid, &td);
2265 if (err) {
2266 dev_err(dev, "dpni_set_threshold() failed\n");
2267 return err;
2268 }
2269
2270 return 0;
2271}
2272
2273static int setup_tx_flow(struct dpaa2_eth_priv *priv,
2274 struct dpaa2_eth_fq *fq)
2275{
2276 struct device *dev = priv->net_dev->dev.parent;
2277 struct dpni_queue queue;
2278 struct dpni_queue_id qid;
2279 int err;
2280
2281 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2282 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
2283 if (err) {
2284 dev_err(dev, "dpni_get_queue(TX) failed\n");
2285 return err;
2286 }
2287
2288 fq->tx_qdbin = qid.qdbin;
2289
2290 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2291 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2292 &queue, &qid);
2293 if (err) {
2294 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2295 return err;
2296 }
2297
2298 fq->fqid = qid.fqid;
2299
2300 queue.destination.id = fq->channel->dpcon_id;
2301 queue.destination.type = DPNI_DEST_DPCON;
2302 queue.destination.priority = 0;
Ioana Radulescu75c583a2018-02-26 10:28:06 -06002303 queue.user_context = (u64)(uintptr_t)fq;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002304 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2305 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2306 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2307 &queue);
2308 if (err) {
2309 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2310 return err;
2311 }
2312
2313 return 0;
2314}
2315
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002316/* Supported header fields for Rx hash distribution key */
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002317static const struct dpaa2_eth_dist_fields dist_fields[] = {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002318 {
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002319 /* L2 header */
2320 .rxnfc_field = RXH_L2DA,
2321 .cls_prot = NET_PROT_ETH,
2322 .cls_field = NH_FLD_ETH_DA,
2323 .size = 6,
2324 }, {
Ioana Radulescuafb90db2018-10-01 13:44:58 +03002325 .cls_prot = NET_PROT_ETH,
2326 .cls_field = NH_FLD_ETH_SA,
2327 .size = 6,
2328 }, {
2329 /* This is the last ethertype field parsed:
2330 * depending on frame format, it can be the MAC ethertype
2331 * or the VLAN etype.
2332 */
2333 .cls_prot = NET_PROT_ETH,
2334 .cls_field = NH_FLD_ETH_TYPE,
2335 .size = 2,
2336 }, {
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002337 /* VLAN header */
2338 .rxnfc_field = RXH_VLAN,
2339 .cls_prot = NET_PROT_VLAN,
2340 .cls_field = NH_FLD_VLAN_TCI,
2341 .size = 2,
2342 }, {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002343 /* IP header */
2344 .rxnfc_field = RXH_IP_SRC,
2345 .cls_prot = NET_PROT_IP,
2346 .cls_field = NH_FLD_IP_SRC,
2347 .size = 4,
2348 }, {
2349 .rxnfc_field = RXH_IP_DST,
2350 .cls_prot = NET_PROT_IP,
2351 .cls_field = NH_FLD_IP_DST,
2352 .size = 4,
2353 }, {
2354 .rxnfc_field = RXH_L3_PROTO,
2355 .cls_prot = NET_PROT_IP,
2356 .cls_field = NH_FLD_IP_PROTO,
2357 .size = 1,
2358 }, {
2359 /* Using UDP ports, this is functionally equivalent to raw
2360 * byte pairs from L4 header.
2361 */
2362 .rxnfc_field = RXH_L4_B_0_1,
2363 .cls_prot = NET_PROT_UDP,
2364 .cls_field = NH_FLD_UDP_PORT_SRC,
2365 .size = 2,
2366 }, {
2367 .rxnfc_field = RXH_L4_B_2_3,
2368 .cls_prot = NET_PROT_UDP,
2369 .cls_field = NH_FLD_UDP_PORT_DST,
2370 .size = 2,
2371 },
2372};
2373
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002374/* Configure the Rx hash key using the legacy API */
2375static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2376{
2377 struct device *dev = priv->net_dev->dev.parent;
2378 struct dpni_rx_tc_dist_cfg dist_cfg;
2379 int err;
2380
2381 memset(&dist_cfg, 0, sizeof(dist_cfg));
2382
2383 dist_cfg.key_cfg_iova = key;
2384 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2385 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2386
2387 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2388 if (err)
2389 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2390
2391 return err;
2392}
2393
2394/* Configure the Rx hash key using the new API */
2395static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2396{
2397 struct device *dev = priv->net_dev->dev.parent;
2398 struct dpni_rx_dist_cfg dist_cfg;
2399 int err;
2400
2401 memset(&dist_cfg, 0, sizeof(dist_cfg));
2402
2403 dist_cfg.key_cfg_iova = key;
2404 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2405 dist_cfg.enable = 1;
2406
2407 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2408 if (err)
2409 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2410
2411 return err;
2412}
2413
Ioana Radulescu4aaaf9b2018-10-01 13:44:57 +03002414/* Configure the Rx flow classification key */
2415static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2416{
2417 struct device *dev = priv->net_dev->dev.parent;
2418 struct dpni_rx_dist_cfg dist_cfg;
2419 int err;
2420
2421 memset(&dist_cfg, 0, sizeof(dist_cfg));
2422
2423 dist_cfg.key_cfg_iova = key;
2424 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2425 dist_cfg.enable = 1;
2426
2427 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2428 if (err)
2429 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2430
2431 return err;
2432}
2433
Ioana Radulescuafb90db2018-10-01 13:44:58 +03002434/* Size of the Rx flow classification key */
2435int dpaa2_eth_cls_key_size(void)
2436{
2437 int i, size = 0;
2438
2439 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
2440 size += dist_fields[i].size;
2441
2442 return size;
2443}
2444
2445/* Offset of header field in Rx classification key */
2446int dpaa2_eth_cls_fld_off(int prot, int field)
2447{
2448 int i, off = 0;
2449
2450 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2451 if (dist_fields[i].cls_prot == prot &&
2452 dist_fields[i].cls_field == field)
2453 return off;
2454 off += dist_fields[i].size;
2455 }
2456
2457 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2458 return 0;
2459}
2460
Ioana Radulescu4aaaf9b2018-10-01 13:44:57 +03002461/* Set Rx distribution (hash or flow classification) key
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002462 * flags is a combination of RXH_ bits
2463 */
Ioana Ciornei3233c152018-10-12 16:27:29 +00002464static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
2465 enum dpaa2_eth_rx_dist type, u64 flags)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002466{
2467 struct device *dev = net_dev->dev.parent;
2468 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2469 struct dpkg_profile_cfg cls_cfg;
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002470 u32 rx_hash_fields = 0;
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002471 dma_addr_t key_iova;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002472 u8 *dma_mem;
2473 int i;
2474 int err = 0;
2475
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002476 memset(&cls_cfg, 0, sizeof(cls_cfg));
2477
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002478 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002479 struct dpkg_extract *key =
2480 &cls_cfg.extracts[cls_cfg.num_extracts];
2481
Ioana Radulescu4aaaf9b2018-10-01 13:44:57 +03002482 /* For Rx hashing key we set only the selected fields.
2483 * For Rx flow classification key we set all supported fields
2484 */
2485 if (type == DPAA2_ETH_RX_DIST_HASH) {
2486 if (!(flags & dist_fields[i].rxnfc_field))
2487 continue;
2488 rx_hash_fields |= dist_fields[i].rxnfc_field;
2489 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002490
2491 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2492 dev_err(dev, "error adding key extraction rule, too many rules?\n");
2493 return -E2BIG;
2494 }
2495
2496 key->type = DPKG_EXTRACT_FROM_HDR;
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002497 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002498 key->extract.from_hdr.type = DPKG_FULL_FIELD;
Ioana Radulescuf76c4832018-10-01 13:44:56 +03002499 key->extract.from_hdr.field = dist_fields[i].cls_field;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002500 cls_cfg.num_extracts++;
2501 }
2502
Ioana Radulescue40ef9e2017-06-06 10:00:30 -05002503 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002504 if (!dma_mem)
2505 return -ENOMEM;
2506
2507 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2508 if (err) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002509 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002510 goto free_key;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002511 }
2512
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002513 /* Prepare for setting the rx dist */
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002514 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
2515 DMA_TO_DEVICE);
2516 if (dma_mapping_error(dev, key_iova)) {
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002517 dev_err(dev, "DMA mapping failed\n");
2518 err = -ENOMEM;
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002519 goto free_key;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002520 }
2521
Ioana Radulescu4aaaf9b2018-10-01 13:44:57 +03002522 if (type == DPAA2_ETH_RX_DIST_HASH) {
2523 if (dpaa2_eth_has_legacy_dist(priv))
2524 err = config_legacy_hash_key(priv, key_iova);
2525 else
2526 err = config_hash_key(priv, key_iova);
2527 } else {
2528 err = config_cls_key(priv, key_iova);
2529 }
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002530
2531 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
2532 DMA_TO_DEVICE);
Ioana Radulescu4aaaf9b2018-10-01 13:44:57 +03002533 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002534 priv->rx_hash_fields = rx_hash_fields;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002535
Ioana Radulescudf85aeb2018-10-01 13:44:55 +03002536free_key:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002537 kfree(dma_mem);
2538 return err;
2539}
2540
Ioana Radulescu4aaaf9b2018-10-01 13:44:57 +03002541int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2542{
2543 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2544
2545 if (!dpaa2_eth_hash_enabled(priv))
2546 return -EOPNOTSUPP;
2547
2548 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
2549}
2550
2551static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
2552{
2553 struct device *dev = priv->net_dev->dev.parent;
2554
2555 /* Check if we actually support Rx flow classification */
2556 if (dpaa2_eth_has_legacy_dist(priv)) {
2557 dev_dbg(dev, "Rx cls not supported by current MC version\n");
2558 return -EOPNOTSUPP;
2559 }
2560
2561 if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
2562 !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
2563 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
2564 return -EOPNOTSUPP;
2565 }
2566
2567 if (!dpaa2_eth_hash_enabled(priv)) {
2568 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
2569 return -EOPNOTSUPP;
2570 }
2571
2572 priv->rx_cls_enabled = 1;
2573
2574 return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
2575}
2576
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002577/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2578 * frame queues and channels
2579 */
2580static int bind_dpni(struct dpaa2_eth_priv *priv)
2581{
2582 struct net_device *net_dev = priv->net_dev;
2583 struct device *dev = net_dev->dev.parent;
2584 struct dpni_pools_cfg pools_params;
2585 struct dpni_error_cfg err_cfg;
2586 int err = 0;
2587 int i;
2588
2589 pools_params.num_dpbp = 1;
2590 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2591 pools_params.pools[0].backup_pool = 0;
2592 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2593 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2594 if (err) {
2595 dev_err(dev, "dpni_set_pools() failed\n");
2596 return err;
2597 }
2598
Ioana Radulescu227686b2018-07-27 09:12:59 -05002599 /* have the interface implicitly distribute traffic based on
2600 * the default hash key
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002601 */
Ioana Radulescu227686b2018-07-27 09:12:59 -05002602 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
Ioana Ciocoi Radulescuedad8d22018-09-24 15:36:21 +00002603 if (err && err != -EOPNOTSUPP)
Ioana Radulescu0f4c2952017-10-11 08:29:50 -05002604 dev_err(dev, "Failed to configure hashing\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002605
Ioana Radulescu4aaaf9b2018-10-01 13:44:57 +03002606 /* Configure the flow classification key; it includes all
2607 * supported header fields and cannot be modified at runtime
2608 */
2609 err = dpaa2_eth_set_cls(priv);
2610 if (err && err != -EOPNOTSUPP)
2611 dev_err(dev, "Failed to configure Rx classification key\n");
2612
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002613 /* Configure handling of error frames */
Ioana Radulescu39163c02017-06-06 10:00:39 -05002614 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002615 err_cfg.set_frame_annotation = 1;
2616 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2617 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2618 &err_cfg);
2619 if (err) {
2620 dev_err(dev, "dpni_set_errors_behavior failed\n");
2621 return err;
2622 }
2623
2624 /* Configure Rx and Tx conf queues to generate CDANs */
2625 for (i = 0; i < priv->num_fqs; i++) {
2626 switch (priv->fq[i].type) {
2627 case DPAA2_RX_FQ:
2628 err = setup_rx_flow(priv, &priv->fq[i]);
2629 break;
2630 case DPAA2_TX_CONF_FQ:
2631 err = setup_tx_flow(priv, &priv->fq[i]);
2632 break;
2633 default:
2634 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2635 return -EINVAL;
2636 }
2637 if (err)
2638 return err;
2639 }
2640
2641 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2642 DPNI_QUEUE_TX, &priv->tx_qdid);
2643 if (err) {
2644 dev_err(dev, "dpni_get_qdid() failed\n");
2645 return err;
2646 }
2647
2648 return 0;
2649}
2650
2651/* Allocate rings for storing incoming frame descriptors */
2652static int alloc_rings(struct dpaa2_eth_priv *priv)
2653{
2654 struct net_device *net_dev = priv->net_dev;
2655 struct device *dev = net_dev->dev.parent;
2656 int i;
2657
2658 for (i = 0; i < priv->num_channels; i++) {
2659 priv->channel[i]->store =
2660 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2661 if (!priv->channel[i]->store) {
2662 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2663 goto err_ring;
2664 }
2665 }
2666
2667 return 0;
2668
2669err_ring:
2670 for (i = 0; i < priv->num_channels; i++) {
2671 if (!priv->channel[i]->store)
2672 break;
2673 dpaa2_io_store_destroy(priv->channel[i]->store);
2674 }
2675
2676 return -ENOMEM;
2677}
2678
2679static void free_rings(struct dpaa2_eth_priv *priv)
2680{
2681 int i;
2682
2683 for (i = 0; i < priv->num_channels; i++)
2684 dpaa2_io_store_destroy(priv->channel[i]->store);
2685}
2686
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002687static int set_mac_addr(struct dpaa2_eth_priv *priv)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002688{
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002689 struct net_device *net_dev = priv->net_dev;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002690 struct device *dev = net_dev->dev.parent;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002691 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002692 int err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002693
2694 /* Get firmware address, if any */
2695 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2696 if (err) {
2697 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2698 return err;
2699 }
2700
2701 /* Get DPNI attributes address, if any */
2702 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2703 dpni_mac_addr);
2704 if (err) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002705 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002706 return err;
2707 }
2708
2709 /* First check if firmware has any address configured by bootloader */
2710 if (!is_zero_ether_addr(mac_addr)) {
2711 /* If the DPMAC addr != DPNI addr, update it */
2712 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2713 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2714 priv->mc_token,
2715 mac_addr);
2716 if (err) {
2717 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2718 return err;
2719 }
2720 }
2721 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2722 } else if (is_zero_ether_addr(dpni_mac_addr)) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002723 /* No MAC address configured, fill in net_dev->dev_addr
2724 * with a random one
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002725 */
2726 eth_hw_addr_random(net_dev);
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002727 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2728
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002729 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2730 net_dev->dev_addr);
2731 if (err) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002732 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002733 return err;
2734 }
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002735
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002736 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2737 * practical purposes, this will be our "permanent" mac address,
2738 * at least until the next reboot. This move will also permit
2739 * register_netdevice() to properly fill up net_dev->perm_addr.
2740 */
2741 net_dev->addr_assign_type = NET_ADDR_PERM;
2742 } else {
2743 /* NET_ADDR_PERM is default, all we have to do is
2744 * fill in the device addr.
2745 */
2746 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2747 }
2748
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002749 return 0;
2750}
2751
2752static int netdev_init(struct net_device *net_dev)
2753{
2754 struct device *dev = net_dev->dev.parent;
2755 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002756 u32 options = priv->dpni_attrs.options;
2757 u64 supported = 0, not_supported = 0;
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002758 u8 bcast_addr[ETH_ALEN];
Ioana Radulescubb5b42c2017-06-06 10:00:41 -05002759 u8 num_queues;
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002760 int err;
2761
2762 net_dev->netdev_ops = &dpaa2_eth_ops;
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002763 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002764
2765 err = set_mac_addr(priv);
2766 if (err)
2767 return err;
2768
2769 /* Explicitly add the broadcast address to the MAC filtering table */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002770 eth_broadcast_addr(bcast_addr);
2771 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2772 if (err) {
Ioana Radulescu6ab00862017-06-06 10:00:40 -05002773 dev_err(dev, "dpni_add_mac_addr() failed\n");
2774 return err;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002775 }
2776
Ioana Radulescu3ccc8d42018-07-09 10:01:10 -05002777 /* Set MTU upper limit; lower limit is 68B (default value) */
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002778 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
Ioana Radulescu00fee002018-07-09 10:01:11 -05002779 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
Ioana Radulescu81f34e92018-07-12 12:12:29 -05002780 DPAA2_ETH_MFL);
Ioana Radulescu00fee002018-07-09 10:01:11 -05002781 if (err) {
2782 dev_err(dev, "dpni_set_max_frame_length() failed\n");
2783 return err;
2784 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002785
Ioana Radulescubb5b42c2017-06-06 10:00:41 -05002786 /* Set actual number of queues in the net device */
2787 num_queues = dpaa2_eth_queue_count(priv);
2788 err = netif_set_real_num_tx_queues(net_dev, num_queues);
2789 if (err) {
2790 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
2791 return err;
2792 }
2793 err = netif_set_real_num_rx_queues(net_dev, num_queues);
2794 if (err) {
2795 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
2796 return err;
2797 }
2798
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05002799 /* Capabilities listing */
2800 supported |= IFF_LIVE_ADDR_CHANGE;
2801
2802 if (options & DPNI_OPT_NO_MAC_FILTER)
2803 not_supported |= IFF_UNICAST_FLT;
2804 else
2805 supported |= IFF_UNICAST_FLT;
2806
2807 net_dev->priv_flags |= supported;
2808 net_dev->priv_flags &= ~not_supported;
2809
2810 /* Features */
2811 net_dev->features = NETIF_F_RXCSUM |
2812 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2813 NETIF_F_SG | NETIF_F_HIGHDMA |
2814 NETIF_F_LLTX;
2815 net_dev->hw_features = net_dev->features;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002816
2817 return 0;
2818}
2819
2820static int poll_link_state(void *arg)
2821{
2822 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2823 int err;
2824
2825 while (!kthread_should_stop()) {
2826 err = link_state_update(priv);
2827 if (unlikely(err))
2828 return err;
2829
2830 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2831 }
2832
2833 return 0;
2834}
2835
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002836static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2837{
Ioana Radulescu112197d2017-10-11 08:29:49 -05002838 u32 status = ~0;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002839 struct device *dev = (struct device *)arg;
2840 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2841 struct net_device *net_dev = dev_get_drvdata(dev);
2842 int err;
2843
2844 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2845 DPNI_IRQ_INDEX, &status);
2846 if (unlikely(err)) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002847 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
Ioana Radulescu112197d2017-10-11 08:29:49 -05002848 return IRQ_HANDLED;
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002849 }
2850
Ioana Radulescu112197d2017-10-11 08:29:49 -05002851 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002852 link_state_update(netdev_priv(net_dev));
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002853
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002854 return IRQ_HANDLED;
2855}
2856
2857static int setup_irqs(struct fsl_mc_device *ls_dev)
2858{
2859 int err = 0;
2860 struct fsl_mc_device_irq *irq;
2861
2862 err = fsl_mc_allocate_irqs(ls_dev);
2863 if (err) {
2864 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2865 return err;
2866 }
2867
2868 irq = ls_dev->irqs[0];
2869 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
Ioana Radulescufdc9b532018-03-23 08:44:05 -05002870 NULL, dpni_irq0_handler_thread,
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002871 IRQF_NO_SUSPEND | IRQF_ONESHOT,
2872 dev_name(&ls_dev->dev), &ls_dev->dev);
2873 if (err < 0) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002874 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002875 goto free_mc_irq;
2876 }
2877
2878 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2879 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2880 if (err < 0) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002881 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002882 goto free_irq;
2883 }
2884
2885 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2886 DPNI_IRQ_INDEX, 1);
2887 if (err < 0) {
Ioana Radulescu77160af2017-06-06 10:00:28 -05002888 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002889 goto free_irq;
2890 }
2891
2892 return 0;
2893
2894free_irq:
2895 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2896free_mc_irq:
2897 fsl_mc_free_irqs(ls_dev);
2898
2899 return err;
2900}
2901
2902static void add_ch_napi(struct dpaa2_eth_priv *priv)
2903{
2904 int i;
2905 struct dpaa2_eth_channel *ch;
2906
2907 for (i = 0; i < priv->num_channels; i++) {
2908 ch = priv->channel[i];
2909 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2910 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2911 NAPI_POLL_WEIGHT);
2912 }
2913}
2914
2915static void del_ch_napi(struct dpaa2_eth_priv *priv)
2916{
2917 int i;
2918 struct dpaa2_eth_channel *ch;
2919
2920 for (i = 0; i < priv->num_channels; i++) {
2921 ch = priv->channel[i];
2922 netif_napi_del(&ch->napi);
2923 }
2924}
2925
2926static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2927{
2928 struct device *dev;
2929 struct net_device *net_dev = NULL;
2930 struct dpaa2_eth_priv *priv = NULL;
2931 int err = 0;
2932
2933 dev = &dpni_dev->dev;
2934
2935 /* Net device */
2936 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2937 if (!net_dev) {
2938 dev_err(dev, "alloc_etherdev_mq() failed\n");
2939 return -ENOMEM;
2940 }
2941
2942 SET_NETDEV_DEV(net_dev, dev);
2943 dev_set_drvdata(dev, net_dev);
2944
2945 priv = netdev_priv(net_dev);
2946 priv->net_dev = net_dev;
2947
Ioana Radulescu08eb2392017-05-24 07:13:27 -05002948 priv->iommu_domain = iommu_get_domain_for_dev(dev);
2949
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002950 /* Obtain a MC portal */
2951 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2952 &priv->mc_io);
2953 if (err) {
Ioana Radulescu8c369612018-03-20 07:04:46 -05002954 if (err == -ENXIO)
2955 err = -EPROBE_DEFER;
2956 else
2957 dev_err(dev, "MC portal allocation failed\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002958 goto err_portal_alloc;
2959 }
2960
2961 /* MC objects initialization and configuration */
2962 err = setup_dpni(dpni_dev);
2963 if (err)
2964 goto err_dpni_setup;
2965
2966 err = setup_dpio(priv);
2967 if (err)
2968 goto err_dpio_setup;
2969
2970 setup_fqs(priv);
2971
2972 err = setup_dpbp(priv);
2973 if (err)
2974 goto err_dpbp_setup;
2975
2976 err = bind_dpni(priv);
2977 if (err)
2978 goto err_bind;
2979
2980 /* Add a NAPI context for each channel */
2981 add_ch_napi(priv);
2982
2983 /* Percpu statistics */
2984 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2985 if (!priv->percpu_stats) {
2986 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2987 err = -ENOMEM;
2988 goto err_alloc_percpu_stats;
2989 }
Ioana Radulescu85047ab2017-04-28 04:50:31 -05002990 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2991 if (!priv->percpu_extras) {
2992 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2993 err = -ENOMEM;
2994 goto err_alloc_percpu_extras;
2995 }
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05002996
2997 err = netdev_init(net_dev);
2998 if (err)
2999 goto err_netdev_init;
3000
3001 /* Configure checksum offload based on current interface flags */
3002 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
3003 if (err)
3004 goto err_csum;
3005
3006 err = set_tx_csum(priv, !!(net_dev->features &
3007 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
3008 if (err)
3009 goto err_csum;
3010
3011 err = alloc_rings(priv);
3012 if (err)
3013 goto err_alloc_rings;
3014
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003015 err = setup_irqs(dpni_dev);
3016 if (err) {
3017 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
3018 priv->poll_thread = kthread_run(poll_link_state, priv,
3019 "%s_poll_link", net_dev->name);
3020 if (IS_ERR(priv->poll_thread)) {
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05003021 dev_err(dev, "Error starting polling thread\n");
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003022 goto err_poll_thread;
3023 }
3024 priv->do_link_poll = true;
3025 }
3026
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05003027 err = register_netdev(net_dev);
3028 if (err < 0) {
3029 dev_err(dev, "register_netdev() failed\n");
3030 goto err_netdev_reg;
3031 }
3032
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003033 dev_info(dev, "Probed interface %s\n", net_dev->name);
3034 return 0;
3035
Ioana Radulescu7f12c8a32018-08-29 04:42:39 -05003036err_netdev_reg:
3037 if (priv->do_link_poll)
3038 kthread_stop(priv->poll_thread);
3039 else
3040 fsl_mc_free_irqs(dpni_dev);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003041err_poll_thread:
3042 free_rings(priv);
3043err_alloc_rings:
3044err_csum:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003045err_netdev_init:
Ioana Radulescu85047ab2017-04-28 04:50:31 -05003046 free_percpu(priv->percpu_extras);
3047err_alloc_percpu_extras:
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003048 free_percpu(priv->percpu_stats);
3049err_alloc_percpu_stats:
3050 del_ch_napi(priv);
3051err_bind:
3052 free_dpbp(priv);
3053err_dpbp_setup:
3054 free_dpio(priv);
3055err_dpio_setup:
3056 free_dpni(priv);
3057err_dpni_setup:
3058 fsl_mc_portal_free(priv->mc_io);
3059err_portal_alloc:
3060 dev_set_drvdata(dev, NULL);
3061 free_netdev(net_dev);
3062
3063 return err;
3064}
3065
3066static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
3067{
3068 struct device *dev;
3069 struct net_device *net_dev;
3070 struct dpaa2_eth_priv *priv;
3071
3072 dev = &ls_dev->dev;
3073 net_dev = dev_get_drvdata(dev);
3074 priv = netdev_priv(net_dev);
3075
3076 unregister_netdev(net_dev);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003077
3078 if (priv->do_link_poll)
3079 kthread_stop(priv->poll_thread);
3080 else
3081 fsl_mc_free_irqs(ls_dev);
3082
3083 free_rings(priv);
3084 free_percpu(priv->percpu_stats);
Ioana Radulescu85047ab2017-04-28 04:50:31 -05003085 free_percpu(priv->percpu_extras);
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003086
3087 del_ch_napi(priv);
3088 free_dpbp(priv);
3089 free_dpio(priv);
3090 free_dpni(priv);
3091
3092 fsl_mc_portal_free(priv->mc_io);
3093
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003094 free_netdev(net_dev);
3095
Ioana Radulescu4bc07aa2018-03-23 10:23:36 -05003096 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
Ioana Radulescu7472dd92018-03-23 08:44:06 -05003097
Ioana Radulescu6e2387e2017-04-28 04:50:29 -05003098 return 0;
3099}
3100
3101static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
3102 {
3103 .vendor = FSL_MC_VENDOR_FREESCALE,
3104 .obj_type = "dpni",
3105 },
3106 { .vendor = 0x0 }
3107};
3108MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
3109
3110static struct fsl_mc_driver dpaa2_eth_driver = {
3111 .driver = {
3112 .name = KBUILD_MODNAME,
3113 .owner = THIS_MODULE,
3114 },
3115 .probe = dpaa2_eth_probe,
3116 .remove = dpaa2_eth_remove,
3117 .match_id_table = dpaa2_eth_match_id_table
3118};
3119
3120module_fsl_mc_driver(dpaa2_eth_driver);