Ioana Ciornei | 0bb29b2 | 2018-07-31 12:02:47 -0500 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2 | /* Copyright 2014-2016 Freescale Semiconductor Inc. |
Ioana Ciornei | 48c0481 | 2020-04-22 15:05:10 +0300 | [diff] [blame] | 3 | * Copyright 2016-2020 NXP |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4 | */ |
| 5 | #include <linux/init.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/platform_device.h> |
| 8 | #include <linux/etherdevice.h> |
| 9 | #include <linux/of_net.h> |
| 10 | #include <linux/interrupt.h> |
| 11 | #include <linux/msi.h> |
| 12 | #include <linux/kthread.h> |
Ioana Radulescu | 08eb239 | 2017-05-24 07:13:27 -0500 | [diff] [blame] | 13 | #include <linux/iommu.h> |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 14 | #include <linux/net_tstamp.h> |
Bogdan Purcareata | 6bd067c | 2018-02-05 08:07:42 -0600 | [diff] [blame] | 15 | #include <linux/fsl/mc.h> |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 16 | #include <linux/bpf.h> |
| 17 | #include <linux/bpf_trace.h> |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 18 | #include <net/sock.h> |
| 19 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 20 | #include "dpaa2-eth.h" |
| 21 | |
Ioana Radulescu | 5636187 | 2017-04-28 04:50:32 -0500 | [diff] [blame] | 22 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files |
| 23 | * using trace events only need to #include <trace/events/sched.h> |
| 24 | */ |
| 25 | #define CREATE_TRACE_POINTS |
| 26 | #include "dpaa2-eth-trace.h" |
| 27 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 28 | MODULE_LICENSE("Dual BSD/GPL"); |
| 29 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
| 30 | MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); |
| 31 | |
Ioana Radulescu | 08eb239 | 2017-05-24 07:13:27 -0500 | [diff] [blame] | 32 | static void *dpaa2_iova_to_virt(struct iommu_domain *domain, |
| 33 | dma_addr_t iova_addr) |
| 34 | { |
| 35 | phys_addr_t phys_addr; |
| 36 | |
| 37 | phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; |
| 38 | |
| 39 | return phys_to_virt(phys_addr); |
| 40 | } |
| 41 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 42 | static void validate_rx_csum(struct dpaa2_eth_priv *priv, |
| 43 | u32 fd_status, |
| 44 | struct sk_buff *skb) |
| 45 | { |
| 46 | skb_checksum_none_assert(skb); |
| 47 | |
| 48 | /* HW checksum validation is disabled, nothing to do here */ |
| 49 | if (!(priv->net_dev->features & NETIF_F_RXCSUM)) |
| 50 | return; |
| 51 | |
| 52 | /* Read checksum validation bits */ |
| 53 | if (!((fd_status & DPAA2_FAS_L3CV) && |
| 54 | (fd_status & DPAA2_FAS_L4CV))) |
| 55 | return; |
| 56 | |
| 57 | /* Inform the stack there's no need to compute L3/L4 csum anymore */ |
| 58 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 59 | } |
| 60 | |
| 61 | /* Free a received FD. |
| 62 | * Not to be used for Tx conf FDs or on any other paths. |
| 63 | */ |
| 64 | static void free_rx_fd(struct dpaa2_eth_priv *priv, |
| 65 | const struct dpaa2_fd *fd, |
| 66 | void *vaddr) |
| 67 | { |
| 68 | struct device *dev = priv->net_dev->dev.parent; |
| 69 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
| 70 | u8 fd_format = dpaa2_fd_get_format(fd); |
| 71 | struct dpaa2_sg_entry *sgt; |
| 72 | void *sg_vaddr; |
| 73 | int i; |
| 74 | |
| 75 | /* If single buffer frame, just free the data buffer */ |
| 76 | if (fd_format == dpaa2_fd_single) |
| 77 | goto free_buf; |
| 78 | else if (fd_format != dpaa2_fd_sg) |
| 79 | /* We don't support any other format */ |
| 80 | return; |
| 81 | |
Ioana Radulescu | 729d79b | 2017-10-11 08:29:48 -0500 | [diff] [blame] | 82 | /* For S/G frames, we first need to free all SG entries |
| 83 | * except the first one, which was taken care of already |
| 84 | */ |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 85 | sgt = vaddr + dpaa2_fd_get_offset(fd); |
Ioana Radulescu | 729d79b | 2017-10-11 08:29:48 -0500 | [diff] [blame] | 86 | for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 87 | addr = dpaa2_sg_get_addr(&sgt[i]); |
Ioana Radulescu | 08eb239 | 2017-05-24 07:13:27 -0500 | [diff] [blame] | 88 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 89 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 90 | DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 91 | |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 92 | free_pages((unsigned long)sg_vaddr, 0); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 93 | if (dpaa2_sg_is_final(&sgt[i])) |
| 94 | break; |
| 95 | } |
| 96 | |
| 97 | free_buf: |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 98 | free_pages((unsigned long)vaddr, 0); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | /* Build a linear skb based on a single-buffer frame descriptor */ |
Ioana Ciornei | fdb6ca9 | 2018-10-12 16:27:35 +0000 | [diff] [blame] | 102 | static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 103 | const struct dpaa2_fd *fd, |
| 104 | void *fd_vaddr) |
| 105 | { |
| 106 | struct sk_buff *skb = NULL; |
| 107 | u16 fd_offset = dpaa2_fd_get_offset(fd); |
| 108 | u32 fd_length = dpaa2_fd_get_len(fd); |
| 109 | |
Ioana Radulescu | cbb3ea4 | 2017-10-11 08:29:44 -0500 | [diff] [blame] | 110 | ch->buf_count--; |
| 111 | |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 112 | skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 113 | if (unlikely(!skb)) |
| 114 | return NULL; |
| 115 | |
| 116 | skb_reserve(skb, fd_offset); |
| 117 | skb_put(skb, fd_length); |
| 118 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 119 | return skb; |
| 120 | } |
| 121 | |
| 122 | /* Build a non linear (fragmented) skb based on a S/G table */ |
| 123 | static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, |
| 124 | struct dpaa2_eth_channel *ch, |
| 125 | struct dpaa2_sg_entry *sgt) |
| 126 | { |
| 127 | struct sk_buff *skb = NULL; |
| 128 | struct device *dev = priv->net_dev->dev.parent; |
| 129 | void *sg_vaddr; |
| 130 | dma_addr_t sg_addr; |
| 131 | u16 sg_offset; |
| 132 | u32 sg_length; |
| 133 | struct page *page, *head_page; |
| 134 | int page_offset; |
| 135 | int i; |
| 136 | |
| 137 | for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { |
| 138 | struct dpaa2_sg_entry *sge = &sgt[i]; |
| 139 | |
| 140 | /* NOTE: We only support SG entries in dpaa2_sg_single format, |
| 141 | * but this is the only format we may receive from HW anyway |
| 142 | */ |
| 143 | |
| 144 | /* Get the address and length from the S/G entry */ |
| 145 | sg_addr = dpaa2_sg_get_addr(sge); |
Ioana Radulescu | 08eb239 | 2017-05-24 07:13:27 -0500 | [diff] [blame] | 146 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 147 | dma_unmap_page(dev, sg_addr, priv->rx_buf_size, |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 148 | DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 149 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 150 | sg_length = dpaa2_sg_get_len(sge); |
| 151 | |
| 152 | if (i == 0) { |
| 153 | /* We build the skb around the first data buffer */ |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 154 | skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); |
Ioana Radulescu | cbb3ea4 | 2017-10-11 08:29:44 -0500 | [diff] [blame] | 155 | if (unlikely(!skb)) { |
Ioana Radulescu | 729d79b | 2017-10-11 08:29:48 -0500 | [diff] [blame] | 156 | /* Free the first SG entry now, since we already |
| 157 | * unmapped it and obtained the virtual address |
| 158 | */ |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 159 | free_pages((unsigned long)sg_vaddr, 0); |
Ioana Radulescu | 729d79b | 2017-10-11 08:29:48 -0500 | [diff] [blame] | 160 | |
Ioana Radulescu | cbb3ea4 | 2017-10-11 08:29:44 -0500 | [diff] [blame] | 161 | /* We still need to subtract the buffers used |
| 162 | * by this FD from our software counter |
| 163 | */ |
| 164 | while (!dpaa2_sg_is_final(&sgt[i]) && |
| 165 | i < DPAA2_ETH_MAX_SG_ENTRIES) |
| 166 | i++; |
| 167 | break; |
| 168 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 169 | |
| 170 | sg_offset = dpaa2_sg_get_offset(sge); |
| 171 | skb_reserve(skb, sg_offset); |
| 172 | skb_put(skb, sg_length); |
| 173 | } else { |
| 174 | /* Rest of the data buffers are stored as skb frags */ |
| 175 | page = virt_to_page(sg_vaddr); |
| 176 | head_page = virt_to_head_page(sg_vaddr); |
| 177 | |
| 178 | /* Offset in page (which may be compound). |
| 179 | * Data in subsequent SG entries is stored from the |
| 180 | * beginning of the buffer, so we don't need to add the |
| 181 | * sg_offset. |
| 182 | */ |
| 183 | page_offset = ((unsigned long)sg_vaddr & |
| 184 | (PAGE_SIZE - 1)) + |
| 185 | (page_address(page) - page_address(head_page)); |
| 186 | |
| 187 | skb_add_rx_frag(skb, i - 1, head_page, page_offset, |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 188 | sg_length, priv->rx_buf_size); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | if (dpaa2_sg_is_final(sge)) |
| 192 | break; |
| 193 | } |
| 194 | |
Ioana Radulescu | b63baf7 | 2017-10-11 08:29:45 -0500 | [diff] [blame] | 195 | WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); |
| 196 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 197 | /* Count all data buffers + SG table buffer */ |
| 198 | ch->buf_count -= i + 2; |
| 199 | |
| 200 | return skb; |
| 201 | } |
| 202 | |
Ioana Ciocoi Radulescu | 569375f | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 203 | /* Free buffers acquired from the buffer pool or which were meant to |
| 204 | * be released in the pool |
| 205 | */ |
| 206 | static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) |
| 207 | { |
| 208 | struct device *dev = priv->net_dev->dev.parent; |
| 209 | void *vaddr; |
| 210 | int i; |
| 211 | |
| 212 | for (i = 0; i < count; i++) { |
| 213 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 214 | dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 215 | DMA_BIDIRECTIONAL); |
| 216 | free_pages((unsigned long)vaddr, 0); |
Ioana Ciocoi Radulescu | 569375f | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 217 | } |
| 218 | } |
| 219 | |
Ioana Ciocoi Radulescu | 5d39dc2 | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 220 | static void xdp_release_buf(struct dpaa2_eth_priv *priv, |
| 221 | struct dpaa2_eth_channel *ch, |
| 222 | dma_addr_t addr) |
| 223 | { |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 224 | int retries = 0; |
Ioana Ciocoi Radulescu | 5d39dc2 | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 225 | int err; |
| 226 | |
| 227 | ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; |
| 228 | if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) |
| 229 | return; |
| 230 | |
| 231 | while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, |
| 232 | ch->xdp.drop_bufs, |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 233 | ch->xdp.drop_cnt)) == -EBUSY) { |
| 234 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) |
| 235 | break; |
Ioana Ciocoi Radulescu | 5d39dc2 | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 236 | cpu_relax(); |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 237 | } |
Ioana Ciocoi Radulescu | 5d39dc2 | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 238 | |
| 239 | if (err) { |
| 240 | free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); |
| 241 | ch->buf_count -= ch->xdp.drop_cnt; |
| 242 | } |
| 243 | |
| 244 | ch->xdp.drop_cnt = 0; |
| 245 | } |
| 246 | |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 247 | static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, |
| 248 | struct dpaa2_eth_fq *fq, |
| 249 | struct dpaa2_eth_xdp_fds *xdp_fds) |
| 250 | { |
| 251 | int total_enqueued = 0, retries = 0, enqueued; |
| 252 | struct dpaa2_eth_drv_stats *percpu_extras; |
| 253 | int num_fds, err, max_retries; |
| 254 | struct dpaa2_fd *fds; |
| 255 | |
| 256 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
| 257 | |
| 258 | /* try to enqueue all the FDs until the max number of retries is hit */ |
| 259 | fds = xdp_fds->fds; |
| 260 | num_fds = xdp_fds->num; |
| 261 | max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; |
| 262 | while (total_enqueued < num_fds && retries < max_retries) { |
| 263 | err = priv->enqueue(priv, fq, &fds[total_enqueued], |
| 264 | 0, num_fds - total_enqueued, &enqueued); |
| 265 | if (err == -EBUSY) { |
| 266 | percpu_extras->tx_portal_busy += ++retries; |
| 267 | continue; |
| 268 | } |
| 269 | total_enqueued += enqueued; |
| 270 | } |
| 271 | xdp_fds->num = 0; |
| 272 | |
| 273 | return total_enqueued; |
| 274 | } |
| 275 | |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 276 | static void xdp_tx_flush(struct dpaa2_eth_priv *priv, |
| 277 | struct dpaa2_eth_channel *ch, |
| 278 | struct dpaa2_eth_fq *fq) |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 279 | { |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 280 | struct rtnl_link_stats64 *percpu_stats; |
| 281 | struct dpaa2_fd *fds; |
| 282 | int enqueued, i; |
| 283 | |
| 284 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
| 285 | |
| 286 | // enqueue the array of XDP_TX frames |
| 287 | enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); |
| 288 | |
| 289 | /* update statistics */ |
| 290 | percpu_stats->tx_packets += enqueued; |
| 291 | fds = fq->xdp_tx_fds.fds; |
| 292 | for (i = 0; i < enqueued; i++) { |
| 293 | percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); |
| 294 | ch->stats.xdp_tx++; |
| 295 | } |
| 296 | for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { |
| 297 | xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); |
| 298 | percpu_stats->tx_errors++; |
| 299 | ch->stats.xdp_tx_err++; |
| 300 | } |
| 301 | fq->xdp_tx_fds.num = 0; |
| 302 | } |
| 303 | |
| 304 | static void xdp_enqueue(struct dpaa2_eth_priv *priv, |
| 305 | struct dpaa2_eth_channel *ch, |
| 306 | struct dpaa2_fd *fd, |
| 307 | void *buf_start, u16 queue_id) |
| 308 | { |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 309 | struct dpaa2_faead *faead; |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 310 | struct dpaa2_fd *dest_fd; |
| 311 | struct dpaa2_eth_fq *fq; |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 312 | u32 ctrl, frc; |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 313 | |
| 314 | /* Mark the egress frame hardware annotation area as valid */ |
| 315 | frc = dpaa2_fd_get_frc(fd); |
| 316 | dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); |
| 317 | dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); |
| 318 | |
| 319 | /* Instruct hardware to release the FD buffer directly into |
| 320 | * the buffer pool once transmission is completed, instead of |
| 321 | * sending a Tx confirmation frame to us |
| 322 | */ |
| 323 | ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; |
| 324 | faead = dpaa2_get_faead(buf_start, false); |
| 325 | faead->ctrl = cpu_to_le32(ctrl); |
| 326 | faead->conf_fqid = 0; |
| 327 | |
| 328 | fq = &priv->fq[queue_id]; |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 329 | dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; |
| 330 | memcpy(dest_fd, fd, sizeof(*dest_fd)); |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 331 | |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 332 | if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) |
| 333 | return; |
| 334 | |
| 335 | xdp_tx_flush(priv, ch, fq); |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 336 | } |
| 337 | |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 338 | static u32 run_xdp(struct dpaa2_eth_priv *priv, |
| 339 | struct dpaa2_eth_channel *ch, |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 340 | struct dpaa2_eth_fq *rx_fq, |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 341 | struct dpaa2_fd *fd, void *vaddr) |
| 342 | { |
Ioana Ciocoi Radulescu | 5d39dc2 | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 343 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 344 | struct bpf_prog *xdp_prog; |
| 345 | struct xdp_buff xdp; |
| 346 | u32 xdp_act = XDP_PASS; |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 347 | int err; |
| 348 | |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 349 | rcu_read_lock(); |
| 350 | |
| 351 | xdp_prog = READ_ONCE(ch->xdp.prog); |
| 352 | if (!xdp_prog) |
| 353 | goto out; |
| 354 | |
| 355 | xdp.data = vaddr + dpaa2_fd_get_offset(fd); |
| 356 | xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); |
Ioana Ciocoi Radulescu | 7b1eea1 | 2018-11-26 16:27:30 +0000 | [diff] [blame] | 357 | xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 358 | xdp_set_data_meta_invalid(&xdp); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 359 | xdp.rxq = &ch->xdp_rxq; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 360 | |
Jesper Dangaard Brouer | 4a9b052 | 2020-05-14 12:49:53 +0200 | [diff] [blame] | 361 | xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE - |
| 362 | (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM); |
| 363 | |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 364 | xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 365 | |
Ioana Ciocoi Radulescu | 7b1eea1 | 2018-11-26 16:27:30 +0000 | [diff] [blame] | 366 | /* xdp.data pointer may have changed */ |
| 367 | dpaa2_fd_set_offset(fd, xdp.data - vaddr); |
| 368 | dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); |
| 369 | |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 370 | switch (xdp_act) { |
| 371 | case XDP_PASS: |
| 372 | break; |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 373 | case XDP_TX: |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 374 | xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 375 | break; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 376 | default: |
| 377 | bpf_warn_invalid_xdp_action(xdp_act); |
Ioana Ciocoi Radulescu | c1cb11b | 2018-11-29 08:43:40 +0000 | [diff] [blame] | 378 | /* fall through */ |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 379 | case XDP_ABORTED: |
| 380 | trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); |
Ioana Ciocoi Radulescu | c1cb11b | 2018-11-29 08:43:40 +0000 | [diff] [blame] | 381 | /* fall through */ |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 382 | case XDP_DROP: |
Ioana Ciocoi Radulescu | 5d39dc2 | 2018-11-26 16:27:31 +0000 | [diff] [blame] | 383 | xdp_release_buf(priv, ch, addr); |
Ioana Ciocoi Radulescu | a4a7b76 | 2018-11-26 16:27:34 +0000 | [diff] [blame] | 384 | ch->stats.xdp_drop++; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 385 | break; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 386 | case XDP_REDIRECT: |
| 387 | dma_unmap_page(priv->net_dev->dev.parent, addr, |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 388 | priv->rx_buf_size, DMA_BIDIRECTIONAL); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 389 | ch->buf_count--; |
Jesper Dangaard Brouer | 4a9b052 | 2020-05-14 12:49:53 +0200 | [diff] [blame] | 390 | |
| 391 | /* Allow redirect use of full headroom */ |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 392 | xdp.data_hard_start = vaddr; |
Jesper Dangaard Brouer | 4a9b052 | 2020-05-14 12:49:53 +0200 | [diff] [blame] | 393 | xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE; |
| 394 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 395 | err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); |
| 396 | if (unlikely(err)) |
| 397 | ch->stats.xdp_drop++; |
| 398 | else |
| 399 | ch->stats.xdp_redirect++; |
| 400 | break; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 401 | } |
| 402 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 403 | ch->xdp.res |= xdp_act; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 404 | out: |
| 405 | rcu_read_unlock(); |
| 406 | return xdp_act; |
| 407 | } |
| 408 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 409 | /* Main Rx frame processing routine */ |
| 410 | static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, |
| 411 | struct dpaa2_eth_channel *ch, |
| 412 | const struct dpaa2_fd *fd, |
Ioana Ciocoi Radulescu | dbcdf72 | 2018-11-14 11:48:35 +0000 | [diff] [blame] | 413 | struct dpaa2_eth_fq *fq) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 414 | { |
| 415 | dma_addr_t addr = dpaa2_fd_get_addr(fd); |
| 416 | u8 fd_format = dpaa2_fd_get_format(fd); |
| 417 | void *vaddr; |
| 418 | struct sk_buff *skb; |
| 419 | struct rtnl_link_stats64 *percpu_stats; |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 420 | struct dpaa2_eth_drv_stats *percpu_extras; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 421 | struct device *dev = priv->net_dev->dev.parent; |
| 422 | struct dpaa2_fas *fas; |
Ioana Radulescu | d695e76 | 2017-06-06 10:00:35 -0500 | [diff] [blame] | 423 | void *buf_data; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 424 | u32 status = 0; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 425 | u32 xdp_act; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 426 | |
Ioana Radulescu | 5636187 | 2017-04-28 04:50:32 -0500 | [diff] [blame] | 427 | /* Tracing point */ |
| 428 | trace_dpaa2_rx_fd(priv->net_dev, fd); |
| 429 | |
Ioana Radulescu | 08eb239 | 2017-05-24 07:13:27 -0500 | [diff] [blame] | 430 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 431 | dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, |
Ioana Ciocoi Radulescu | 18c2e77 | 2018-11-26 16:27:32 +0000 | [diff] [blame] | 432 | DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 433 | |
Ioana Radulescu | 54ce891 | 2017-12-08 06:47:53 -0600 | [diff] [blame] | 434 | fas = dpaa2_get_fas(vaddr, false); |
Ioana Radulescu | d695e76 | 2017-06-06 10:00:35 -0500 | [diff] [blame] | 435 | prefetch(fas); |
| 436 | buf_data = vaddr + dpaa2_fd_get_offset(fd); |
| 437 | prefetch(buf_data); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 438 | |
| 439 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 440 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 441 | |
| 442 | if (fd_format == dpaa2_fd_single) { |
Ioana Ciocoi Radulescu | 99e4352 | 2018-11-26 16:27:33 +0000 | [diff] [blame] | 443 | xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 444 | if (xdp_act != XDP_PASS) { |
| 445 | percpu_stats->rx_packets++; |
| 446 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); |
| 447 | return; |
| 448 | } |
| 449 | |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 450 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 451 | DMA_BIDIRECTIONAL); |
Ioana Ciornei | fdb6ca9 | 2018-10-12 16:27:35 +0000 | [diff] [blame] | 452 | skb = build_linear_skb(ch, fd, vaddr); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 453 | } else if (fd_format == dpaa2_fd_sg) { |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 454 | WARN_ON(priv->xdp_prog); |
| 455 | |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 456 | dma_unmap_page(dev, addr, priv->rx_buf_size, |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 457 | DMA_BIDIRECTIONAL); |
Ioana Radulescu | d695e76 | 2017-06-06 10:00:35 -0500 | [diff] [blame] | 458 | skb = build_frag_skb(priv, ch, buf_data); |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 459 | free_pages((unsigned long)vaddr, 0); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 460 | percpu_extras->rx_sg_frames++; |
| 461 | percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 462 | } else { |
| 463 | /* We don't support any other format */ |
| 464 | goto err_frame_format; |
| 465 | } |
| 466 | |
| 467 | if (unlikely(!skb)) |
| 468 | goto err_build_skb; |
| 469 | |
| 470 | prefetch(skb->data); |
| 471 | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 472 | /* Get the timestamp value */ |
| 473 | if (priv->rx_tstamp) { |
| 474 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); |
| 475 | __le64 *ts = dpaa2_get_ts(vaddr, false); |
| 476 | u64 ns; |
| 477 | |
| 478 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
| 479 | |
| 480 | ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); |
| 481 | shhwtstamps->hwtstamp = ns_to_ktime(ns); |
| 482 | } |
| 483 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 484 | /* Check if we need to validate the L4 csum */ |
| 485 | if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 486 | status = le32_to_cpu(fas->status); |
| 487 | validate_rx_csum(priv, status, skb); |
| 488 | } |
| 489 | |
| 490 | skb->protocol = eth_type_trans(skb, priv->net_dev); |
Ioana Ciocoi Radulescu | dbcdf72 | 2018-11-14 11:48:35 +0000 | [diff] [blame] | 491 | skb_record_rx_queue(skb, fq->flowid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 492 | |
| 493 | percpu_stats->rx_packets++; |
| 494 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); |
| 495 | |
Ioana Ciornei | 0a25d92 | 2019-03-25 13:42:39 +0000 | [diff] [blame] | 496 | list_add_tail(&skb->list, ch->rx_list); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 497 | |
| 498 | return; |
| 499 | |
| 500 | err_build_skb: |
| 501 | free_rx_fd(priv, fd, vaddr); |
| 502 | err_frame_format: |
| 503 | percpu_stats->rx_dropped++; |
| 504 | } |
| 505 | |
| 506 | /* Consume all frames pull-dequeued into the store. This is the simplest way to |
| 507 | * make sure we don't accidentally issue another volatile dequeue which would |
| 508 | * overwrite (leak) frames already in the store. |
| 509 | * |
| 510 | * Observance of NAPI budget is not our concern, leaving that to the caller. |
| 511 | */ |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 512 | static int consume_frames(struct dpaa2_eth_channel *ch, |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 513 | struct dpaa2_eth_fq **src) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 514 | { |
| 515 | struct dpaa2_eth_priv *priv = ch->priv; |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 516 | struct dpaa2_eth_fq *fq = NULL; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 517 | struct dpaa2_dq *dq; |
| 518 | const struct dpaa2_fd *fd; |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 519 | int cleaned = 0, retries = 0; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 520 | int is_last; |
| 521 | |
| 522 | do { |
| 523 | dq = dpaa2_io_store_next(ch->store, &is_last); |
| 524 | if (unlikely(!dq)) { |
| 525 | /* If we're here, we *must* have placed a |
| 526 | * volatile dequeue comnmand, so keep reading through |
| 527 | * the store until we get some sort of valid response |
| 528 | * token (either a valid frame or an "empty dequeue") |
| 529 | */ |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 530 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) { |
| 531 | netdev_err_once(priv->net_dev, |
| 532 | "Unable to read a valid dequeue response\n"); |
| 533 | return -ETIMEDOUT; |
| 534 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 535 | continue; |
| 536 | } |
| 537 | |
| 538 | fd = dpaa2_dq_fd(dq); |
Ioana Radulescu | 75c583a | 2018-02-26 10:28:06 -0600 | [diff] [blame] | 539 | fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 540 | |
Ioana Ciocoi Radulescu | dbcdf72 | 2018-11-14 11:48:35 +0000 | [diff] [blame] | 541 | fq->consume(priv, ch, fd, fq); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 542 | cleaned++; |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 543 | retries = 0; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 544 | } while (!is_last); |
| 545 | |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 546 | if (!cleaned) |
| 547 | return 0; |
| 548 | |
| 549 | fq->stats.frames += cleaned; |
Ioana Ciornei | 460fd83 | 2020-04-24 12:33:18 +0300 | [diff] [blame] | 550 | ch->stats.frames += cleaned; |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 551 | |
| 552 | /* A dequeue operation only pulls frames from a single queue |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 553 | * into the store. Return the frame queue as an out param. |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 554 | */ |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 555 | if (src) |
| 556 | *src = fq; |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 557 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 558 | return cleaned; |
| 559 | } |
| 560 | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 561 | /* Configure the egress frame annotation for timestamp update */ |
| 562 | static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) |
| 563 | { |
| 564 | struct dpaa2_faead *faead; |
| 565 | u32 ctrl, frc; |
| 566 | |
| 567 | /* Mark the egress frame annotation area as valid */ |
| 568 | frc = dpaa2_fd_get_frc(fd); |
| 569 | dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); |
| 570 | |
| 571 | /* Set hardware annotation size */ |
| 572 | ctrl = dpaa2_fd_get_ctrl(fd); |
| 573 | dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); |
| 574 | |
| 575 | /* enable UPD (update prepanded data) bit in FAEAD field of |
| 576 | * hardware frame annotation area |
| 577 | */ |
| 578 | ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; |
| 579 | faead = dpaa2_get_faead(buf_start, true); |
| 580 | faead->ctrl = cpu_to_le32(ctrl); |
| 581 | } |
| 582 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 583 | /* Create a frame descriptor based on a fragmented skb */ |
| 584 | static int build_sg_fd(struct dpaa2_eth_priv *priv, |
| 585 | struct sk_buff *skb, |
| 586 | struct dpaa2_fd *fd) |
| 587 | { |
| 588 | struct device *dev = priv->net_dev->dev.parent; |
| 589 | void *sgt_buf = NULL; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 590 | dma_addr_t addr; |
| 591 | int nr_frags = skb_shinfo(skb)->nr_frags; |
| 592 | struct dpaa2_sg_entry *sgt; |
| 593 | int i, err; |
| 594 | int sgt_buf_size; |
| 595 | struct scatterlist *scl, *crt_scl; |
| 596 | int num_sg; |
| 597 | int num_dma_bufs; |
| 598 | struct dpaa2_eth_swa *swa; |
| 599 | |
| 600 | /* Create and map scatterlist. |
| 601 | * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have |
| 602 | * to go beyond nr_frags+1. |
| 603 | * Note: We don't support chained scatterlists |
| 604 | */ |
| 605 | if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) |
| 606 | return -EINVAL; |
| 607 | |
| 608 | scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); |
| 609 | if (unlikely(!scl)) |
| 610 | return -ENOMEM; |
| 611 | |
| 612 | sg_init_table(scl, nr_frags + 1); |
| 613 | num_sg = skb_to_sgvec(skb, scl, 0, skb->len); |
Ioana Ciornei | 37fbbdda | 2020-06-24 14:34:18 +0300 | [diff] [blame] | 614 | if (unlikely(num_sg < 0)) { |
| 615 | err = -ENOMEM; |
| 616 | goto dma_map_sg_failed; |
| 617 | } |
Ioana Radulescu | 1e5fa9e | 2017-05-24 07:13:28 -0500 | [diff] [blame] | 618 | num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 619 | if (unlikely(!num_dma_bufs)) { |
| 620 | err = -ENOMEM; |
| 621 | goto dma_map_sg_failed; |
| 622 | } |
| 623 | |
| 624 | /* Prepare the HW SGT structure */ |
| 625 | sgt_buf_size = priv->tx_data_offset + |
Ioana Radulescu | fa722c0 | 2018-03-23 08:44:12 -0500 | [diff] [blame] | 626 | sizeof(struct dpaa2_sg_entry) * num_dma_bufs; |
Sebastian Andrzej Siewior | 90bc6d4 | 2019-06-07 21:20:37 +0200 | [diff] [blame] | 627 | sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 628 | if (unlikely(!sgt_buf)) { |
| 629 | err = -ENOMEM; |
| 630 | goto sgt_buf_alloc_failed; |
| 631 | } |
| 632 | sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); |
Ioana Radulescu | 6a9bbe5 | 2018-03-14 15:04:51 -0500 | [diff] [blame] | 633 | memset(sgt_buf, 0, sgt_buf_size); |
| 634 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 635 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
| 636 | |
| 637 | /* Fill in the HW SGT structure. |
| 638 | * |
| 639 | * sgt_buf is zeroed out, so the following fields are implicit |
| 640 | * in all sgt entries: |
| 641 | * - offset is 0 |
| 642 | * - format is 'dpaa2_sg_single' |
| 643 | */ |
| 644 | for_each_sg(scl, crt_scl, num_dma_bufs, i) { |
| 645 | dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); |
| 646 | dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); |
| 647 | } |
| 648 | dpaa2_sg_set_final(&sgt[i - 1], true); |
| 649 | |
| 650 | /* Store the skb backpointer in the SGT buffer. |
| 651 | * Fit the scatterlist and the number of buffers alongside the |
| 652 | * skb backpointer in the software annotation area. We'll need |
| 653 | * all of them on Tx Conf. |
| 654 | */ |
| 655 | swa = (struct dpaa2_eth_swa *)sgt_buf; |
Ioana Radulescu | e3fdf6b | 2019-03-01 17:47:23 +0000 | [diff] [blame] | 656 | swa->type = DPAA2_ETH_SWA_SG; |
| 657 | swa->sg.skb = skb; |
| 658 | swa->sg.scl = scl; |
| 659 | swa->sg.num_sg = num_sg; |
| 660 | swa->sg.sgt_size = sgt_buf_size; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 661 | |
| 662 | /* Separately map the SGT buffer */ |
Ioana Radulescu | 1e5fa9e | 2017-05-24 07:13:28 -0500 | [diff] [blame] | 663 | addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 664 | if (unlikely(dma_mapping_error(dev, addr))) { |
| 665 | err = -ENOMEM; |
| 666 | goto dma_map_single_failed; |
| 667 | } |
| 668 | dpaa2_fd_set_offset(fd, priv->tx_data_offset); |
| 669 | dpaa2_fd_set_format(fd, dpaa2_fd_sg); |
| 670 | dpaa2_fd_set_addr(fd, addr); |
| 671 | dpaa2_fd_set_len(fd, skb->len); |
Ioana Radulescu | b948c8c | 2018-10-12 16:27:40 +0000 | [diff] [blame] | 672 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 673 | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 674 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
| 675 | enable_tx_tstamp(fd, sgt_buf); |
| 676 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 677 | return 0; |
| 678 | |
| 679 | dma_map_single_failed: |
Ioana Radulescu | 6a9bbe5 | 2018-03-14 15:04:51 -0500 | [diff] [blame] | 680 | skb_free_frag(sgt_buf); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 681 | sgt_buf_alloc_failed: |
Ioana Radulescu | 1e5fa9e | 2017-05-24 07:13:28 -0500 | [diff] [blame] | 682 | dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 683 | dma_map_sg_failed: |
| 684 | kfree(scl); |
| 685 | return err; |
| 686 | } |
| 687 | |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 688 | /* Create a SG frame descriptor based on a linear skb. |
| 689 | * |
| 690 | * This function is used on the Tx path when the skb headroom is not large |
| 691 | * enough for the HW requirements, thus instead of realloc-ing the skb we |
| 692 | * create a SG frame descriptor with only one entry. |
| 693 | */ |
| 694 | static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, |
| 695 | struct sk_buff *skb, |
| 696 | struct dpaa2_fd *fd) |
| 697 | { |
| 698 | struct device *dev = priv->net_dev->dev.parent; |
| 699 | struct dpaa2_eth_sgt_cache *sgt_cache; |
| 700 | struct dpaa2_sg_entry *sgt; |
| 701 | struct dpaa2_eth_swa *swa; |
| 702 | dma_addr_t addr, sgt_addr; |
| 703 | void *sgt_buf = NULL; |
| 704 | int sgt_buf_size; |
| 705 | int err; |
| 706 | |
| 707 | /* Prepare the HW SGT structure */ |
| 708 | sgt_cache = this_cpu_ptr(priv->sgt_cache); |
| 709 | sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); |
| 710 | |
| 711 | if (sgt_cache->count == 0) |
| 712 | sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, |
| 713 | GFP_ATOMIC); |
| 714 | else |
| 715 | sgt_buf = sgt_cache->buf[--sgt_cache->count]; |
| 716 | if (unlikely(!sgt_buf)) |
| 717 | return -ENOMEM; |
| 718 | |
| 719 | sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); |
| 720 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); |
| 721 | |
| 722 | addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL); |
| 723 | if (unlikely(dma_mapping_error(dev, addr))) { |
| 724 | err = -ENOMEM; |
| 725 | goto data_map_failed; |
| 726 | } |
| 727 | |
| 728 | /* Fill in the HW SGT structure */ |
| 729 | dpaa2_sg_set_addr(sgt, addr); |
| 730 | dpaa2_sg_set_len(sgt, skb->len); |
| 731 | dpaa2_sg_set_final(sgt, true); |
| 732 | |
| 733 | /* Store the skb backpointer in the SGT buffer */ |
| 734 | swa = (struct dpaa2_eth_swa *)sgt_buf; |
| 735 | swa->type = DPAA2_ETH_SWA_SINGLE; |
| 736 | swa->single.skb = skb; |
| 737 | swa->sg.sgt_size = sgt_buf_size; |
| 738 | |
| 739 | /* Separately map the SGT buffer */ |
| 740 | sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
| 741 | if (unlikely(dma_mapping_error(dev, sgt_addr))) { |
| 742 | err = -ENOMEM; |
| 743 | goto sgt_map_failed; |
| 744 | } |
| 745 | |
| 746 | dpaa2_fd_set_offset(fd, priv->tx_data_offset); |
| 747 | dpaa2_fd_set_format(fd, dpaa2_fd_sg); |
| 748 | dpaa2_fd_set_addr(fd, sgt_addr); |
| 749 | dpaa2_fd_set_len(fd, skb->len); |
| 750 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
| 751 | |
| 752 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
| 753 | enable_tx_tstamp(fd, sgt_buf); |
| 754 | |
| 755 | return 0; |
| 756 | |
| 757 | sgt_map_failed: |
| 758 | dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL); |
| 759 | data_map_failed: |
| 760 | if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) |
| 761 | kfree(sgt_buf); |
| 762 | else |
| 763 | sgt_cache->buf[sgt_cache->count++] = sgt_buf; |
| 764 | |
| 765 | return err; |
| 766 | } |
| 767 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 768 | /* Create a frame descriptor based on a linear skb */ |
| 769 | static int build_single_fd(struct dpaa2_eth_priv *priv, |
| 770 | struct sk_buff *skb, |
| 771 | struct dpaa2_fd *fd) |
| 772 | { |
| 773 | struct device *dev = priv->net_dev->dev.parent; |
Ioana Radulescu | c163685 | 2017-12-08 06:47:58 -0600 | [diff] [blame] | 774 | u8 *buffer_start, *aligned_start; |
Ioana Radulescu | e3fdf6b | 2019-03-01 17:47:23 +0000 | [diff] [blame] | 775 | struct dpaa2_eth_swa *swa; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 776 | dma_addr_t addr; |
| 777 | |
Ioana Radulescu | c163685 | 2017-12-08 06:47:58 -0600 | [diff] [blame] | 778 | buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); |
| 779 | |
| 780 | /* If there's enough room to align the FD address, do it. |
| 781 | * It will help hardware optimize accesses. |
| 782 | */ |
| 783 | aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, |
| 784 | DPAA2_ETH_TX_BUF_ALIGN); |
| 785 | if (aligned_start >= skb->head) |
| 786 | buffer_start = aligned_start; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 787 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 788 | /* Store a backpointer to the skb at the beginning of the buffer |
| 789 | * (in the private data area) such that we can release it |
| 790 | * on Tx confirm |
| 791 | */ |
Ioana Radulescu | e3fdf6b | 2019-03-01 17:47:23 +0000 | [diff] [blame] | 792 | swa = (struct dpaa2_eth_swa *)buffer_start; |
| 793 | swa->type = DPAA2_ETH_SWA_SINGLE; |
| 794 | swa->single.skb = skb; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 795 | |
| 796 | addr = dma_map_single(dev, buffer_start, |
| 797 | skb_tail_pointer(skb) - buffer_start, |
Ioana Radulescu | 1e5fa9e | 2017-05-24 07:13:28 -0500 | [diff] [blame] | 798 | DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 799 | if (unlikely(dma_mapping_error(dev, addr))) |
| 800 | return -ENOMEM; |
| 801 | |
| 802 | dpaa2_fd_set_addr(fd, addr); |
| 803 | dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); |
| 804 | dpaa2_fd_set_len(fd, skb->len); |
| 805 | dpaa2_fd_set_format(fd, dpaa2_fd_single); |
Ioana Radulescu | b948c8c | 2018-10-12 16:27:40 +0000 | [diff] [blame] | 806 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 807 | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 808 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) |
| 809 | enable_tx_tstamp(fd, buffer_start); |
| 810 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 811 | return 0; |
| 812 | } |
| 813 | |
| 814 | /* FD freeing routine on the Tx path |
| 815 | * |
| 816 | * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb |
| 817 | * back-pointed to is also freed. |
| 818 | * This can be called either from dpaa2_eth_tx_conf() or on the error path of |
| 819 | * dpaa2_eth_tx(). |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 820 | */ |
| 821 | static void free_tx_fd(const struct dpaa2_eth_priv *priv, |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 822 | struct dpaa2_eth_fq *fq, |
Ioana Ciocoi Radulescu | 0723a3a | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 823 | const struct dpaa2_fd *fd, bool in_napi) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 824 | { |
| 825 | struct device *dev = priv->net_dev->dev.parent; |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 826 | dma_addr_t fd_addr, sg_addr; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 827 | struct sk_buff *skb = NULL; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 828 | unsigned char *buffer_start; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 829 | struct dpaa2_eth_swa *swa; |
| 830 | u8 fd_format = dpaa2_fd_get_format(fd); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 831 | u32 fd_len = dpaa2_fd_get_len(fd); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 832 | |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 833 | struct dpaa2_eth_sgt_cache *sgt_cache; |
| 834 | struct dpaa2_sg_entry *sgt; |
| 835 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 836 | fd_addr = dpaa2_fd_get_addr(fd); |
Ioana Radulescu | e3fdf6b | 2019-03-01 17:47:23 +0000 | [diff] [blame] | 837 | buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); |
| 838 | swa = (struct dpaa2_eth_swa *)buffer_start; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 839 | |
| 840 | if (fd_format == dpaa2_fd_single) { |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 841 | if (swa->type == DPAA2_ETH_SWA_SINGLE) { |
| 842 | skb = swa->single.skb; |
| 843 | /* Accessing the skb buffer is safe before dma unmap, |
| 844 | * because we didn't map the actual skb shell. |
| 845 | */ |
| 846 | dma_unmap_single(dev, fd_addr, |
| 847 | skb_tail_pointer(skb) - buffer_start, |
| 848 | DMA_BIDIRECTIONAL); |
| 849 | } else { |
| 850 | WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); |
| 851 | dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, |
| 852 | DMA_BIDIRECTIONAL); |
| 853 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 854 | } else if (fd_format == dpaa2_fd_sg) { |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 855 | if (swa->type == DPAA2_ETH_SWA_SG) { |
| 856 | skb = swa->sg.skb; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 857 | |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 858 | /* Unmap the scatterlist */ |
| 859 | dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, |
| 860 | DMA_BIDIRECTIONAL); |
| 861 | kfree(swa->sg.scl); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 862 | |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 863 | /* Unmap the SGT buffer */ |
| 864 | dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, |
| 865 | DMA_BIDIRECTIONAL); |
| 866 | } else { |
| 867 | skb = swa->single.skb; |
| 868 | |
| 869 | /* Unmap the SGT Buffer */ |
| 870 | dma_unmap_single(dev, fd_addr, swa->single.sgt_size, |
| 871 | DMA_BIDIRECTIONAL); |
| 872 | |
| 873 | sgt = (struct dpaa2_sg_entry *)(buffer_start + |
| 874 | priv->tx_data_offset); |
| 875 | sg_addr = dpaa2_sg_get_addr(sgt); |
| 876 | dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL); |
| 877 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 878 | } else { |
Ioana Radulescu | 2b7c86e | 2017-12-08 06:47:56 -0600 | [diff] [blame] | 879 | netdev_dbg(priv->net_dev, "Invalid FD format\n"); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 880 | return; |
| 881 | } |
| 882 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 883 | if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { |
| 884 | fq->dq_frames++; |
| 885 | fq->dq_bytes += fd_len; |
| 886 | } |
| 887 | |
| 888 | if (swa->type == DPAA2_ETH_SWA_XDP) { |
| 889 | xdp_return_frame(swa->xdp.xdpf); |
| 890 | return; |
| 891 | } |
| 892 | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 893 | /* Get the timestamp value */ |
| 894 | if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { |
| 895 | struct skb_shared_hwtstamps shhwtstamps; |
Ioana Radulescu | e3fdf6b | 2019-03-01 17:47:23 +0000 | [diff] [blame] | 896 | __le64 *ts = dpaa2_get_ts(buffer_start, true); |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 897 | u64 ns; |
| 898 | |
| 899 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
| 900 | |
| 901 | ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); |
| 902 | shhwtstamps.hwtstamp = ns_to_ktime(ns); |
| 903 | skb_tstamp_tx(skb, &shhwtstamps); |
| 904 | } |
| 905 | |
Ioana Radulescu | 6a9bbe5 | 2018-03-14 15:04:51 -0500 | [diff] [blame] | 906 | /* Free SGT buffer allocated on tx */ |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 907 | if (fd_format != dpaa2_fd_single) { |
| 908 | sgt_cache = this_cpu_ptr(priv->sgt_cache); |
| 909 | if (swa->type == DPAA2_ETH_SWA_SG) { |
| 910 | skb_free_frag(buffer_start); |
| 911 | } else { |
| 912 | if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) |
| 913 | kfree(buffer_start); |
| 914 | else |
| 915 | sgt_cache->buf[sgt_cache->count++] = buffer_start; |
| 916 | } |
| 917 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 918 | |
| 919 | /* Move on with skb release */ |
Ioana Ciocoi Radulescu | 0723a3a | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 920 | napi_consume_skb(skb, in_napi); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 921 | } |
| 922 | |
Ioana Radulescu | c433db4 | 2017-06-06 10:00:26 -0500 | [diff] [blame] | 923 | static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 924 | { |
| 925 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 926 | struct dpaa2_fd fd; |
| 927 | struct rtnl_link_stats64 *percpu_stats; |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 928 | struct dpaa2_eth_drv_stats *percpu_extras; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 929 | struct dpaa2_eth_fq *fq; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 930 | struct netdev_queue *nq; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 931 | u16 queue_mapping; |
Ioana Radulescu | 18c2146 | 2017-12-08 06:47:57 -0600 | [diff] [blame] | 932 | unsigned int needed_headroom; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 933 | u32 fd_len; |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 934 | u8 prio = 0; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 935 | int err, i; |
| 936 | |
| 937 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 938 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 939 | |
Ioana Radulescu | 18c2146 | 2017-12-08 06:47:57 -0600 | [diff] [blame] | 940 | needed_headroom = dpaa2_eth_needed_headroom(priv, skb); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 941 | |
| 942 | /* We'll be holding a back-reference to the skb until Tx Confirmation; |
| 943 | * we don't want that overwritten by a concurrent Tx with a cloned skb. |
| 944 | */ |
| 945 | skb = skb_unshare(skb, GFP_ATOMIC); |
| 946 | if (unlikely(!skb)) { |
| 947 | /* skb_unshare() has already freed the skb */ |
| 948 | percpu_stats->tx_dropped++; |
| 949 | return NETDEV_TX_OK; |
| 950 | } |
| 951 | |
| 952 | /* Setup the FD fields */ |
| 953 | memset(&fd, 0, sizeof(fd)); |
| 954 | |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 955 | if (skb_is_nonlinear(skb)) { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 956 | err = build_sg_fd(priv, skb, &fd); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 957 | percpu_extras->tx_sg_frames++; |
| 958 | percpu_extras->tx_sg_bytes += skb->len; |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 959 | } else if (skb_headroom(skb) < needed_headroom) { |
| 960 | err = build_sg_fd_single_buf(priv, skb, &fd); |
| 961 | percpu_extras->tx_sg_frames++; |
| 962 | percpu_extras->tx_sg_bytes += skb->len; |
Ioana Ciornei | 4c96c0a | 2020-06-29 21:47:12 +0300 | [diff] [blame] | 963 | percpu_extras->tx_converted_sg_frames++; |
| 964 | percpu_extras->tx_converted_sg_bytes += skb->len; |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 965 | } else { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 966 | err = build_single_fd(priv, skb, &fd); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 967 | } |
| 968 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 969 | if (unlikely(err)) { |
| 970 | percpu_stats->tx_dropped++; |
| 971 | goto err_build_fd; |
| 972 | } |
| 973 | |
Ioana Radulescu | 5636187 | 2017-04-28 04:50:32 -0500 | [diff] [blame] | 974 | /* Tracing point */ |
| 975 | trace_dpaa2_tx_fd(net_dev, &fd); |
| 976 | |
Ioana Radulescu | 537336c | 2017-12-21 06:33:20 -0600 | [diff] [blame] | 977 | /* TxConf FQ selection relies on queue id from the stack. |
| 978 | * In case of a forwarded frame from another DPNI interface, we choose |
| 979 | * a queue affined to the same core that processed the Rx frame |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 980 | */ |
Ioana Radulescu | 537336c | 2017-12-21 06:33:20 -0600 | [diff] [blame] | 981 | queue_mapping = skb_get_queue_mapping(skb); |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 982 | |
| 983 | if (net_dev->num_tc) { |
| 984 | prio = netdev_txq_to_tc(net_dev, queue_mapping); |
| 985 | /* Hardware interprets priority level 0 as being the highest, |
| 986 | * so we need to do a reverse mapping to the netdev tc index |
| 987 | */ |
| 988 | prio = net_dev->num_tc - prio - 1; |
| 989 | /* We have only one FQ array entry for all Tx hardware queues |
| 990 | * with the same flow id (but different priority levels) |
| 991 | */ |
| 992 | queue_mapping %= dpaa2_eth_queue_count(priv); |
| 993 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 994 | fq = &priv->fq[queue_mapping]; |
Ioana Ciornei | 8c838f5 | 2019-03-25 13:06:22 +0000 | [diff] [blame] | 995 | |
| 996 | fd_len = dpaa2_fd_get_len(&fd); |
| 997 | nq = netdev_get_tx_queue(net_dev, queue_mapping); |
| 998 | netdev_tx_sent_queue(nq, fd_len); |
| 999 | |
| 1000 | /* Everything that happens after this enqueues might race with |
| 1001 | * the Tx confirmation callback for this frame |
| 1002 | */ |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1003 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { |
Ioana Ciornei | 6ff8044 | 2020-04-22 15:05:11 +0300 | [diff] [blame] | 1004 | err = priv->enqueue(priv, fq, &fd, prio, 1, NULL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1005 | if (err != -EBUSY) |
| 1006 | break; |
| 1007 | } |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 1008 | percpu_extras->tx_portal_busy += i; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1009 | if (unlikely(err < 0)) { |
| 1010 | percpu_stats->tx_errors++; |
| 1011 | /* Clean up everything, including freeing the skb */ |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 1012 | free_tx_fd(priv, fq, &fd, false); |
Ioana Ciornei | 8c838f5 | 2019-03-25 13:06:22 +0000 | [diff] [blame] | 1013 | netdev_tx_completed_queue(nq, 1, fd_len); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1014 | } else { |
| 1015 | percpu_stats->tx_packets++; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1016 | percpu_stats->tx_bytes += fd_len; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1017 | } |
| 1018 | |
| 1019 | return NETDEV_TX_OK; |
| 1020 | |
| 1021 | err_build_fd: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1022 | dev_kfree_skb(skb); |
| 1023 | |
| 1024 | return NETDEV_TX_OK; |
| 1025 | } |
| 1026 | |
| 1027 | /* Tx confirmation frame processing routine */ |
| 1028 | static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, |
Ioana Ciornei | b00c898 | 2018-10-12 16:27:38 +0000 | [diff] [blame] | 1029 | struct dpaa2_eth_channel *ch __always_unused, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1030 | const struct dpaa2_fd *fd, |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1031 | struct dpaa2_eth_fq *fq) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1032 | { |
| 1033 | struct rtnl_link_stats64 *percpu_stats; |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 1034 | struct dpaa2_eth_drv_stats *percpu_extras; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1035 | u32 fd_len = dpaa2_fd_get_len(fd); |
Ioana Radulescu | 39163c0 | 2017-06-06 10:00:39 -0500 | [diff] [blame] | 1036 | u32 fd_errors; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1037 | |
Ioana Radulescu | 5636187 | 2017-04-28 04:50:32 -0500 | [diff] [blame] | 1038 | /* Tracing point */ |
| 1039 | trace_dpaa2_tx_conf_fd(priv->net_dev, fd); |
| 1040 | |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 1041 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
| 1042 | percpu_extras->tx_conf_frames++; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1043 | percpu_extras->tx_conf_bytes += fd_len; |
| 1044 | |
Ioana Radulescu | 39163c0 | 2017-06-06 10:00:39 -0500 | [diff] [blame] | 1045 | /* Check frame errors in the FD field */ |
| 1046 | fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 1047 | free_tx_fd(priv, fq, fd, true); |
Ioana Radulescu | 39163c0 | 2017-06-06 10:00:39 -0500 | [diff] [blame] | 1048 | |
| 1049 | if (likely(!fd_errors)) |
| 1050 | return; |
| 1051 | |
Ioana Radulescu | 2b7c86e | 2017-12-08 06:47:56 -0600 | [diff] [blame] | 1052 | if (net_ratelimit()) |
| 1053 | netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", |
| 1054 | fd_errors); |
| 1055 | |
Ioana Radulescu | 39163c0 | 2017-06-06 10:00:39 -0500 | [diff] [blame] | 1056 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
| 1057 | /* Tx-conf logically pertains to the egress path. */ |
| 1058 | percpu_stats->tx_errors++; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) |
| 1062 | { |
| 1063 | int err; |
| 1064 | |
| 1065 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, |
| 1066 | DPNI_OFF_RX_L3_CSUM, enable); |
| 1067 | if (err) { |
| 1068 | netdev_err(priv->net_dev, |
| 1069 | "dpni_set_offload(RX_L3_CSUM) failed\n"); |
| 1070 | return err; |
| 1071 | } |
| 1072 | |
| 1073 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, |
| 1074 | DPNI_OFF_RX_L4_CSUM, enable); |
| 1075 | if (err) { |
| 1076 | netdev_err(priv->net_dev, |
| 1077 | "dpni_set_offload(RX_L4_CSUM) failed\n"); |
| 1078 | return err; |
| 1079 | } |
| 1080 | |
| 1081 | return 0; |
| 1082 | } |
| 1083 | |
| 1084 | static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) |
| 1085 | { |
| 1086 | int err; |
| 1087 | |
| 1088 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, |
| 1089 | DPNI_OFF_TX_L3_CSUM, enable); |
| 1090 | if (err) { |
| 1091 | netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); |
| 1092 | return err; |
| 1093 | } |
| 1094 | |
| 1095 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, |
| 1096 | DPNI_OFF_TX_L4_CSUM, enable); |
| 1097 | if (err) { |
| 1098 | netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); |
| 1099 | return err; |
| 1100 | } |
| 1101 | |
| 1102 | return 0; |
| 1103 | } |
| 1104 | |
| 1105 | /* Perform a single release command to add buffers |
| 1106 | * to the specified buffer pool |
| 1107 | */ |
Ioana Radulescu | 7ec0596 | 2018-01-05 05:04:32 -0600 | [diff] [blame] | 1108 | static int add_bufs(struct dpaa2_eth_priv *priv, |
| 1109 | struct dpaa2_eth_channel *ch, u16 bpid) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1110 | { |
| 1111 | struct device *dev = priv->net_dev->dev.parent; |
| 1112 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 1113 | struct page *page; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1114 | dma_addr_t addr; |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1115 | int retries = 0; |
Ioana Radulescu | 87eb55e | 2017-10-11 08:29:43 -0500 | [diff] [blame] | 1116 | int i, err; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1117 | |
| 1118 | for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { |
| 1119 | /* Allocate buffer visible to WRIOP + skb shared info + |
| 1120 | * alignment padding |
| 1121 | */ |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 1122 | /* allocate one page for each Rx buffer. WRIOP sees |
| 1123 | * the entire page except for a tailroom reserved for |
| 1124 | * skb shared info |
| 1125 | */ |
| 1126 | page = dev_alloc_pages(0); |
| 1127 | if (!page) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1128 | goto err_alloc; |
| 1129 | |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 1130 | addr = dma_map_page(dev, page, 0, priv->rx_buf_size, |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 1131 | DMA_BIDIRECTIONAL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1132 | if (unlikely(dma_mapping_error(dev, addr))) |
| 1133 | goto err_map; |
| 1134 | |
| 1135 | buf_array[i] = addr; |
Ioana Radulescu | 5636187 | 2017-04-28 04:50:32 -0500 | [diff] [blame] | 1136 | |
| 1137 | /* tracing point */ |
| 1138 | trace_dpaa2_eth_buf_seed(priv->net_dev, |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 1139 | page, DPAA2_ETH_RX_BUF_RAW_SIZE, |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 1140 | addr, priv->rx_buf_size, |
Ioana Radulescu | 5636187 | 2017-04-28 04:50:32 -0500 | [diff] [blame] | 1141 | bpid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1142 | } |
| 1143 | |
| 1144 | release_bufs: |
Ioana Radulescu | 87eb55e | 2017-10-11 08:29:43 -0500 | [diff] [blame] | 1145 | /* In case the portal is busy, retry until successful */ |
Ioana Radulescu | 7ec0596 | 2018-01-05 05:04:32 -0600 | [diff] [blame] | 1146 | while ((err = dpaa2_io_service_release(ch->dpio, bpid, |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1147 | buf_array, i)) == -EBUSY) { |
| 1148 | if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) |
| 1149 | break; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1150 | cpu_relax(); |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1151 | } |
Ioana Radulescu | 87eb55e | 2017-10-11 08:29:43 -0500 | [diff] [blame] | 1152 | |
| 1153 | /* If release command failed, clean up and bail out; |
| 1154 | * not much else we can do about it |
| 1155 | */ |
| 1156 | if (err) { |
| 1157 | free_bufs(priv, buf_array, i); |
| 1158 | return 0; |
| 1159 | } |
| 1160 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1161 | return i; |
| 1162 | |
| 1163 | err_map: |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 1164 | __free_pages(page, 0); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1165 | err_alloc: |
Ioana Radulescu | 87eb55e | 2017-10-11 08:29:43 -0500 | [diff] [blame] | 1166 | /* If we managed to allocate at least some buffers, |
| 1167 | * release them to hardware |
| 1168 | */ |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1169 | if (i) |
| 1170 | goto release_bufs; |
| 1171 | |
| 1172 | return 0; |
| 1173 | } |
| 1174 | |
| 1175 | static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) |
| 1176 | { |
| 1177 | int i, j; |
| 1178 | int new_count; |
| 1179 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1180 | for (j = 0; j < priv->num_channels; j++) { |
| 1181 | for (i = 0; i < DPAA2_ETH_NUM_BUFS; |
| 1182 | i += DPAA2_ETH_BUFS_PER_CMD) { |
Ioana Radulescu | 7ec0596 | 2018-01-05 05:04:32 -0600 | [diff] [blame] | 1183 | new_count = add_bufs(priv, priv->channel[j], bpid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1184 | priv->channel[j]->buf_count += new_count; |
| 1185 | |
| 1186 | if (new_count < DPAA2_ETH_BUFS_PER_CMD) { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1187 | return -ENOMEM; |
| 1188 | } |
| 1189 | } |
| 1190 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1191 | |
| 1192 | return 0; |
| 1193 | } |
| 1194 | |
| 1195 | /** |
| 1196 | * Drain the specified number of buffers from the DPNI's private buffer pool. |
| 1197 | * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD |
| 1198 | */ |
| 1199 | static void drain_bufs(struct dpaa2_eth_priv *priv, int count) |
| 1200 | { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1201 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1202 | int retries = 0; |
Ioana Radulescu | 87eb55e | 2017-10-11 08:29:43 -0500 | [diff] [blame] | 1203 | int ret; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1204 | |
| 1205 | do { |
Ioana Radulescu | 05fa39c | 2017-06-06 10:00:37 -0500 | [diff] [blame] | 1206 | ret = dpaa2_io_service_acquire(NULL, priv->bpid, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1207 | buf_array, count); |
| 1208 | if (ret < 0) { |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1209 | if (ret == -EBUSY && |
Ioana Ciornei | 0e5ad75 | 2020-06-24 14:34:19 +0300 | [diff] [blame] | 1210 | retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1211 | continue; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1212 | netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); |
| 1213 | return; |
| 1214 | } |
Ioana Radulescu | 87eb55e | 2017-10-11 08:29:43 -0500 | [diff] [blame] | 1215 | free_bufs(priv, buf_array, ret); |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1216 | retries = 0; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1217 | } while (ret); |
| 1218 | } |
| 1219 | |
| 1220 | static void drain_pool(struct dpaa2_eth_priv *priv) |
| 1221 | { |
| 1222 | int i; |
| 1223 | |
| 1224 | drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); |
| 1225 | drain_bufs(priv, 1); |
| 1226 | |
| 1227 | for (i = 0; i < priv->num_channels; i++) |
| 1228 | priv->channel[i]->buf_count = 0; |
| 1229 | } |
| 1230 | |
| 1231 | /* Function is called from softirq context only, so we don't need to guard |
| 1232 | * the access to percpu count |
| 1233 | */ |
| 1234 | static int refill_pool(struct dpaa2_eth_priv *priv, |
| 1235 | struct dpaa2_eth_channel *ch, |
| 1236 | u16 bpid) |
| 1237 | { |
| 1238 | int new_count; |
| 1239 | |
| 1240 | if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) |
| 1241 | return 0; |
| 1242 | |
| 1243 | do { |
Ioana Radulescu | 7ec0596 | 2018-01-05 05:04:32 -0600 | [diff] [blame] | 1244 | new_count = add_bufs(priv, ch, bpid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1245 | if (unlikely(!new_count)) { |
| 1246 | /* Out of memory; abort for now, we'll try later on */ |
| 1247 | break; |
| 1248 | } |
| 1249 | ch->buf_count += new_count; |
| 1250 | } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); |
| 1251 | |
| 1252 | if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) |
| 1253 | return -ENOMEM; |
| 1254 | |
| 1255 | return 0; |
| 1256 | } |
| 1257 | |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 1258 | static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv) |
| 1259 | { |
| 1260 | struct dpaa2_eth_sgt_cache *sgt_cache; |
| 1261 | u16 count; |
| 1262 | int k, i; |
| 1263 | |
Ioana Ciornei | 0fe665d | 2020-07-06 17:55:54 +0300 | [diff] [blame] | 1264 | for_each_possible_cpu(k) { |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 1265 | sgt_cache = per_cpu_ptr(priv->sgt_cache, k); |
| 1266 | count = sgt_cache->count; |
| 1267 | |
| 1268 | for (i = 0; i < count; i++) |
| 1269 | kfree(sgt_cache->buf[i]); |
| 1270 | sgt_cache->count = 0; |
| 1271 | } |
| 1272 | } |
| 1273 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1274 | static int pull_channel(struct dpaa2_eth_channel *ch) |
| 1275 | { |
| 1276 | int err; |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 1277 | int dequeues = -1; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1278 | |
| 1279 | /* Retry while portal is busy */ |
| 1280 | do { |
Ioana Radulescu | 7ec0596 | 2018-01-05 05:04:32 -0600 | [diff] [blame] | 1281 | err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, |
| 1282 | ch->store); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 1283 | dequeues++; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1284 | cpu_relax(); |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1285 | } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1286 | |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 1287 | ch->stats.dequeue_portal_busy += dequeues; |
| 1288 | if (unlikely(err)) |
| 1289 | ch->stats.pull_err++; |
| 1290 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1291 | return err; |
| 1292 | } |
| 1293 | |
| 1294 | /* NAPI poll routine |
| 1295 | * |
| 1296 | * Frames are dequeued from the QMan channel associated with this NAPI context. |
| 1297 | * Rx, Tx confirmation and (if configured) Rx error frames all count |
| 1298 | * towards the NAPI budget. |
| 1299 | */ |
| 1300 | static int dpaa2_eth_poll(struct napi_struct *napi, int budget) |
| 1301 | { |
| 1302 | struct dpaa2_eth_channel *ch; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1303 | struct dpaa2_eth_priv *priv; |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1304 | int rx_cleaned = 0, txconf_cleaned = 0; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1305 | struct dpaa2_eth_fq *fq, *txc_fq = NULL; |
| 1306 | struct netdev_queue *nq; |
| 1307 | int store_cleaned, work_done; |
Ioana Ciornei | 0a25d92 | 2019-03-25 13:42:39 +0000 | [diff] [blame] | 1308 | struct list_head rx_list; |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1309 | int retries = 0; |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 1310 | u16 flowid; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1311 | int err; |
| 1312 | |
| 1313 | ch = container_of(napi, struct dpaa2_eth_channel, napi); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 1314 | ch->xdp.res = 0; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1315 | priv = ch->priv; |
| 1316 | |
Ioana Ciornei | 0a25d92 | 2019-03-25 13:42:39 +0000 | [diff] [blame] | 1317 | INIT_LIST_HEAD(&rx_list); |
| 1318 | ch->rx_list = &rx_list; |
| 1319 | |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1320 | do { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1321 | err = pull_channel(ch); |
| 1322 | if (unlikely(err)) |
| 1323 | break; |
| 1324 | |
| 1325 | /* Refill pool if appropriate */ |
Ioana Radulescu | 05fa39c | 2017-06-06 10:00:37 -0500 | [diff] [blame] | 1326 | refill_pool(priv, ch, priv->bpid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1327 | |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1328 | store_cleaned = consume_frames(ch, &fq); |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1329 | if (store_cleaned <= 0) |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1330 | break; |
| 1331 | if (fq->type == DPAA2_RX_FQ) { |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1332 | rx_cleaned += store_cleaned; |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 1333 | flowid = fq->flowid; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1334 | } else { |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1335 | txconf_cleaned += store_cleaned; |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1336 | /* We have a single Tx conf FQ on this channel */ |
| 1337 | txc_fq = fq; |
| 1338 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1339 | |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1340 | /* If we either consumed the whole NAPI budget with Rx frames |
| 1341 | * or we reached the Tx confirmations threshold, we're done. |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1342 | */ |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1343 | if (rx_cleaned >= budget || |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1344 | txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { |
| 1345 | work_done = budget; |
| 1346 | goto out; |
| 1347 | } |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1348 | } while (store_cleaned); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1349 | |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1350 | /* We didn't consume the entire budget, so finish napi and |
| 1351 | * re-enable data availability notifications |
| 1352 | */ |
| 1353 | napi_complete_done(napi, rx_cleaned); |
| 1354 | do { |
| 1355 | err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); |
| 1356 | cpu_relax(); |
Ioana Radulescu | ef17bd7 | 2019-10-07 14:38:28 +0300 | [diff] [blame] | 1357 | } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); |
Ioana Ciocoi Radulescu | 68049a5 | 2018-10-08 14:16:31 +0000 | [diff] [blame] | 1358 | WARN_ONCE(err, "CDAN notifications rearm failed on core %d", |
| 1359 | ch->nctx.desired_cpu); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1360 | |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1361 | work_done = max(rx_cleaned, 1); |
| 1362 | |
| 1363 | out: |
Ioana Ciornei | 0a25d92 | 2019-03-25 13:42:39 +0000 | [diff] [blame] | 1364 | netif_receive_skb_list(ch->rx_list); |
| 1365 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 1366 | if (txc_fq && txc_fq->dq_frames) { |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1367 | nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); |
| 1368 | netdev_tx_completed_queue(nq, txc_fq->dq_frames, |
| 1369 | txc_fq->dq_bytes); |
| 1370 | txc_fq->dq_frames = 0; |
| 1371 | txc_fq->dq_bytes = 0; |
| 1372 | } |
| 1373 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 1374 | if (ch->xdp.res & XDP_REDIRECT) |
| 1375 | xdp_do_flush_map(); |
Ioana Ciornei | 74a1c05 | 2020-05-13 16:55:46 +0300 | [diff] [blame] | 1376 | else if (rx_cleaned && ch->xdp.res & XDP_TX) |
| 1377 | xdp_tx_flush(priv, ch, &priv->fq[flowid]); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 1378 | |
Ioana Ciocoi Radulescu | 569dac6 | 2018-11-14 11:48:36 +0000 | [diff] [blame] | 1379 | return work_done; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1380 | } |
| 1381 | |
| 1382 | static void enable_ch_napi(struct dpaa2_eth_priv *priv) |
| 1383 | { |
| 1384 | struct dpaa2_eth_channel *ch; |
| 1385 | int i; |
| 1386 | |
| 1387 | for (i = 0; i < priv->num_channels; i++) { |
| 1388 | ch = priv->channel[i]; |
| 1389 | napi_enable(&ch->napi); |
| 1390 | } |
| 1391 | } |
| 1392 | |
| 1393 | static void disable_ch_napi(struct dpaa2_eth_priv *priv) |
| 1394 | { |
| 1395 | struct dpaa2_eth_channel *ch; |
| 1396 | int i; |
| 1397 | |
| 1398 | for (i = 0; i < priv->num_channels; i++) { |
| 1399 | ch = priv->channel[i]; |
| 1400 | napi_disable(&ch->napi); |
| 1401 | } |
| 1402 | } |
| 1403 | |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1404 | void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, |
| 1405 | bool tx_pause, bool pfc) |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1406 | { |
| 1407 | struct dpni_taildrop td = {0}; |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 1408 | struct dpaa2_eth_fq *fq; |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1409 | int i, err; |
| 1410 | |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1411 | /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if |
| 1412 | * flow control is disabled (as it might interfere with either the |
| 1413 | * buffer pool depletion trigger for pause frames or with the group |
| 1414 | * congestion trigger for PFC frames) |
| 1415 | */ |
Ioana Radulescu | 2c8d1c8 | 2020-05-31 00:08:11 +0300 | [diff] [blame] | 1416 | td.enable = !tx_pause; |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1417 | if (priv->rx_fqtd_enabled == td.enable) |
| 1418 | goto set_cgtd; |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1419 | |
Ioana Radulescu | 2c8d1c8 | 2020-05-31 00:08:11 +0300 | [diff] [blame] | 1420 | td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH; |
| 1421 | td.units = DPNI_CONGESTION_UNIT_BYTES; |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1422 | |
| 1423 | for (i = 0; i < priv->num_fqs; i++) { |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 1424 | fq = &priv->fq[i]; |
| 1425 | if (fq->type != DPAA2_RX_FQ) |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1426 | continue; |
| 1427 | err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 1428 | DPNI_CP_QUEUE, DPNI_QUEUE_RX, |
| 1429 | fq->tc, fq->flowid, &td); |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1430 | if (err) { |
| 1431 | netdev_err(priv->net_dev, |
Ioana Radulescu | 2c8d1c8 | 2020-05-31 00:08:11 +0300 | [diff] [blame] | 1432 | "dpni_set_taildrop(FQ) failed\n"); |
| 1433 | return; |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1434 | } |
| 1435 | } |
| 1436 | |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1437 | priv->rx_fqtd_enabled = td.enable; |
| 1438 | |
| 1439 | set_cgtd: |
Ioana Radulescu | 2c8d1c8 | 2020-05-31 00:08:11 +0300 | [diff] [blame] | 1440 | /* Congestion group taildrop: threshold is in frames, per group |
| 1441 | * of FQs belonging to the same traffic class |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1442 | * Enabled if general Tx pause disabled or if PFCs are enabled |
| 1443 | * (congestion group threhsold for PFC generation is lower than the |
| 1444 | * CG taildrop threshold, so it won't interfere with it; we also |
| 1445 | * want frames in non-PFC enabled traffic classes to be kept in check) |
Ioana Radulescu | 2c8d1c8 | 2020-05-31 00:08:11 +0300 | [diff] [blame] | 1446 | */ |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1447 | td.enable = !tx_pause || (tx_pause && pfc); |
| 1448 | if (priv->rx_cgtd_enabled == td.enable) |
| 1449 | return; |
| 1450 | |
Ioana Radulescu | 2c8d1c8 | 2020-05-31 00:08:11 +0300 | [diff] [blame] | 1451 | td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv); |
| 1452 | td.units = DPNI_CONGESTION_UNIT_FRAMES; |
| 1453 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 1454 | err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
| 1455 | DPNI_CP_GROUP, DPNI_QUEUE_RX, |
| 1456 | i, 0, &td); |
| 1457 | if (err) { |
| 1458 | netdev_err(priv->net_dev, |
| 1459 | "dpni_set_taildrop(CG) failed\n"); |
| 1460 | return; |
| 1461 | } |
| 1462 | } |
| 1463 | |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1464 | priv->rx_cgtd_enabled = td.enable; |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1465 | } |
| 1466 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1467 | static int link_state_update(struct dpaa2_eth_priv *priv) |
| 1468 | { |
Ioana Ciornei | 85b7a34 | 2018-10-12 16:27:33 +0000 | [diff] [blame] | 1469 | struct dpni_link_state state = {0}; |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1470 | bool tx_pause; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1471 | int err; |
| 1472 | |
| 1473 | err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); |
| 1474 | if (unlikely(err)) { |
| 1475 | netdev_err(priv->net_dev, |
| 1476 | "dpni_get_link_state() failed\n"); |
| 1477 | return err; |
| 1478 | } |
| 1479 | |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1480 | /* If Tx pause frame settings have changed, we need to update |
| 1481 | * Rx FQ taildrop configuration as well. We configure taildrop |
| 1482 | * only when pause frame generation is disabled. |
| 1483 | */ |
Ioana Radulescu | ad054f2 | 2020-05-31 00:08:10 +0300 | [diff] [blame] | 1484 | tx_pause = dpaa2_eth_tx_pause_enabled(state.options); |
Ioana Ciornei | 07beb16 | 2020-05-31 00:08:14 +0300 | [diff] [blame] | 1485 | dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled); |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 1486 | |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 1487 | /* When we manage the MAC/PHY using phylink there is no need |
| 1488 | * to manually update the netif_carrier. |
| 1489 | */ |
| 1490 | if (priv->mac) |
| 1491 | goto out; |
| 1492 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1493 | /* Chech link state; speed / duplex changes are not treated yet */ |
| 1494 | if (priv->link_state.up == state.up) |
Ioana Radulescu | cce62943 | 2019-08-28 17:08:14 +0300 | [diff] [blame] | 1495 | goto out; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1496 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1497 | if (state.up) { |
| 1498 | netif_carrier_on(priv->net_dev); |
| 1499 | netif_tx_start_all_queues(priv->net_dev); |
| 1500 | } else { |
| 1501 | netif_tx_stop_all_queues(priv->net_dev); |
| 1502 | netif_carrier_off(priv->net_dev); |
| 1503 | } |
| 1504 | |
Ioana Radulescu | 77160af | 2017-06-06 10:00:28 -0500 | [diff] [blame] | 1505 | netdev_info(priv->net_dev, "Link Event: state %s\n", |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1506 | state.up ? "up" : "down"); |
| 1507 | |
Ioana Radulescu | cce62943 | 2019-08-28 17:08:14 +0300 | [diff] [blame] | 1508 | out: |
| 1509 | priv->link_state = state; |
| 1510 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1511 | return 0; |
| 1512 | } |
| 1513 | |
| 1514 | static int dpaa2_eth_open(struct net_device *net_dev) |
| 1515 | { |
| 1516 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 1517 | int err; |
| 1518 | |
Ioana Radulescu | 05fa39c | 2017-06-06 10:00:37 -0500 | [diff] [blame] | 1519 | err = seed_pool(priv, priv->bpid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1520 | if (err) { |
| 1521 | /* Not much to do; the buffer pool, though not filled up, |
| 1522 | * may still contain some buffers which would enable us |
| 1523 | * to limp on. |
| 1524 | */ |
| 1525 | netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", |
Ioana Radulescu | 05fa39c | 2017-06-06 10:00:37 -0500 | [diff] [blame] | 1526 | priv->dpbp_dev->obj_desc.id, priv->bpid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1527 | } |
| 1528 | |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 1529 | if (!priv->mac) { |
| 1530 | /* We'll only start the txqs when the link is actually ready; |
| 1531 | * make sure we don't race against the link up notification, |
| 1532 | * which may come immediately after dpni_enable(); |
| 1533 | */ |
| 1534 | netif_tx_stop_all_queues(net_dev); |
| 1535 | |
| 1536 | /* Also, explicitly set carrier off, otherwise |
| 1537 | * netif_carrier_ok() will return true and cause 'ip link show' |
| 1538 | * to report the LOWER_UP flag, even though the link |
| 1539 | * notification wasn't even received. |
| 1540 | */ |
| 1541 | netif_carrier_off(net_dev); |
| 1542 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1543 | enable_ch_napi(priv); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1544 | |
| 1545 | err = dpni_enable(priv->mc_io, 0, priv->mc_token); |
| 1546 | if (err < 0) { |
| 1547 | netdev_err(net_dev, "dpni_enable() failed\n"); |
| 1548 | goto enable_err; |
| 1549 | } |
| 1550 | |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 1551 | if (!priv->mac) { |
| 1552 | /* If the DPMAC object has already processed the link up |
| 1553 | * interrupt, we have to learn the link state ourselves. |
| 1554 | */ |
| 1555 | err = link_state_update(priv); |
| 1556 | if (err < 0) { |
| 1557 | netdev_err(net_dev, "Can't update link state\n"); |
| 1558 | goto link_state_err; |
| 1559 | } |
| 1560 | } else { |
| 1561 | phylink_start(priv->mac->phylink); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1562 | } |
| 1563 | |
| 1564 | return 0; |
| 1565 | |
| 1566 | link_state_err: |
| 1567 | enable_err: |
| 1568 | disable_ch_napi(priv); |
| 1569 | drain_pool(priv); |
| 1570 | return err; |
| 1571 | } |
| 1572 | |
Ioana Ciocoi Radulescu | 68d7431 | 2019-01-16 16:51:44 +0000 | [diff] [blame] | 1573 | /* Total number of in-flight frames on ingress queues */ |
| 1574 | static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1575 | { |
Ioana Ciocoi Radulescu | 68d7431 | 2019-01-16 16:51:44 +0000 | [diff] [blame] | 1576 | struct dpaa2_eth_fq *fq; |
| 1577 | u32 fcnt = 0, bcnt = 0, total = 0; |
| 1578 | int i, err; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1579 | |
Ioana Ciocoi Radulescu | 68d7431 | 2019-01-16 16:51:44 +0000 | [diff] [blame] | 1580 | for (i = 0; i < priv->num_fqs; i++) { |
| 1581 | fq = &priv->fq[i]; |
| 1582 | err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); |
| 1583 | if (err) { |
| 1584 | netdev_warn(priv->net_dev, "query_fq_count failed"); |
| 1585 | break; |
| 1586 | } |
| 1587 | total += fcnt; |
| 1588 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1589 | |
| 1590 | return total; |
| 1591 | } |
| 1592 | |
Ioana Radulescu | 52b6a4f | 2019-09-02 13:23:19 +0300 | [diff] [blame] | 1593 | static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1594 | { |
Ioana Ciocoi Radulescu | 68d7431 | 2019-01-16 16:51:44 +0000 | [diff] [blame] | 1595 | int retries = 10; |
| 1596 | u32 pending; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1597 | |
Ioana Ciocoi Radulescu | 68d7431 | 2019-01-16 16:51:44 +0000 | [diff] [blame] | 1598 | do { |
| 1599 | pending = ingress_fq_count(priv); |
| 1600 | if (pending) |
| 1601 | msleep(100); |
| 1602 | } while (pending && --retries); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1603 | } |
| 1604 | |
Ioana Radulescu | 52b6a4f | 2019-09-02 13:23:19 +0300 | [diff] [blame] | 1605 | #define DPNI_TX_PENDING_VER_MAJOR 7 |
| 1606 | #define DPNI_TX_PENDING_VER_MINOR 13 |
| 1607 | static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) |
| 1608 | { |
| 1609 | union dpni_statistics stats; |
| 1610 | int retries = 10; |
| 1611 | int err; |
| 1612 | |
| 1613 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, |
| 1614 | DPNI_TX_PENDING_VER_MINOR) < 0) |
| 1615 | goto out; |
| 1616 | |
| 1617 | do { |
| 1618 | err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, |
| 1619 | &stats); |
| 1620 | if (err) |
| 1621 | goto out; |
| 1622 | if (stats.page_6.tx_pending_frames == 0) |
| 1623 | return; |
| 1624 | } while (--retries); |
| 1625 | |
| 1626 | out: |
| 1627 | msleep(500); |
| 1628 | } |
| 1629 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1630 | static int dpaa2_eth_stop(struct net_device *net_dev) |
| 1631 | { |
| 1632 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
Ioana Ciornei | 85b7a34 | 2018-10-12 16:27:33 +0000 | [diff] [blame] | 1633 | int dpni_enabled = 0; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1634 | int retries = 10; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1635 | |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 1636 | if (!priv->mac) { |
| 1637 | netif_tx_stop_all_queues(net_dev); |
| 1638 | netif_carrier_off(net_dev); |
| 1639 | } else { |
| 1640 | phylink_stop(priv->mac->phylink); |
| 1641 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1642 | |
Ioana Ciocoi Radulescu | 68d7431 | 2019-01-16 16:51:44 +0000 | [diff] [blame] | 1643 | /* On dpni_disable(), the MC firmware will: |
| 1644 | * - stop MAC Rx and wait for all Rx frames to be enqueued to software |
| 1645 | * - cut off WRIOP dequeues from egress FQs and wait until transmission |
| 1646 | * of all in flight Tx frames is finished (and corresponding Tx conf |
| 1647 | * frames are enqueued back to software) |
| 1648 | * |
| 1649 | * Before calling dpni_disable(), we wait for all Tx frames to arrive |
| 1650 | * on WRIOP. After it finishes, wait until all remaining frames on Rx |
| 1651 | * and Tx conf queues are consumed on NAPI poll. |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1652 | */ |
Ioana Radulescu | 52b6a4f | 2019-09-02 13:23:19 +0300 | [diff] [blame] | 1653 | wait_for_egress_fq_empty(priv); |
Ioana Ciocoi Radulescu | 68d7431 | 2019-01-16 16:51:44 +0000 | [diff] [blame] | 1654 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1655 | do { |
| 1656 | dpni_disable(priv->mc_io, 0, priv->mc_token); |
| 1657 | dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); |
| 1658 | if (dpni_enabled) |
| 1659 | /* Allow the hardware some slack */ |
| 1660 | msleep(100); |
| 1661 | } while (dpni_enabled && --retries); |
| 1662 | if (!retries) { |
| 1663 | netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); |
| 1664 | /* Must go on and disable NAPI nonetheless, so we don't crash at |
| 1665 | * the next "ifconfig up" |
| 1666 | */ |
| 1667 | } |
| 1668 | |
Ioana Radulescu | 52b6a4f | 2019-09-02 13:23:19 +0300 | [diff] [blame] | 1669 | wait_for_ingress_fq_empty(priv); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1670 | disable_ch_napi(priv); |
| 1671 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1672 | /* Empty the buffer pool */ |
| 1673 | drain_pool(priv); |
| 1674 | |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 1675 | /* Empty the Scatter-Gather Buffer cache */ |
| 1676 | dpaa2_eth_sgt_cache_drain(priv); |
| 1677 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1678 | return 0; |
| 1679 | } |
| 1680 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1681 | static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) |
| 1682 | { |
| 1683 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 1684 | struct device *dev = net_dev->dev.parent; |
| 1685 | int err; |
| 1686 | |
| 1687 | err = eth_mac_addr(net_dev, addr); |
| 1688 | if (err < 0) { |
| 1689 | dev_err(dev, "eth_mac_addr() failed (%d)\n", err); |
| 1690 | return err; |
| 1691 | } |
| 1692 | |
| 1693 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, |
| 1694 | net_dev->dev_addr); |
| 1695 | if (err) { |
| 1696 | dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); |
| 1697 | return err; |
| 1698 | } |
| 1699 | |
| 1700 | return 0; |
| 1701 | } |
| 1702 | |
| 1703 | /** Fill in counters maintained by the GPP driver. These may be different from |
| 1704 | * the hardware counters obtained by ethtool. |
| 1705 | */ |
Ioana Radulescu | acbff8e | 2017-06-06 10:00:24 -0500 | [diff] [blame] | 1706 | static void dpaa2_eth_get_stats(struct net_device *net_dev, |
| 1707 | struct rtnl_link_stats64 *stats) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1708 | { |
| 1709 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 1710 | struct rtnl_link_stats64 *percpu_stats; |
| 1711 | u64 *cpustats; |
| 1712 | u64 *netstats = (u64 *)stats; |
| 1713 | int i, j; |
| 1714 | int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); |
| 1715 | |
| 1716 | for_each_possible_cpu(i) { |
| 1717 | percpu_stats = per_cpu_ptr(priv->percpu_stats, i); |
| 1718 | cpustats = (u64 *)percpu_stats; |
| 1719 | for (j = 0; j < num; j++) |
| 1720 | netstats[j] += cpustats[j]; |
| 1721 | } |
| 1722 | } |
| 1723 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 1724 | /* Copy mac unicast addresses from @net_dev to @priv. |
| 1725 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. |
| 1726 | */ |
| 1727 | static void add_uc_hw_addr(const struct net_device *net_dev, |
| 1728 | struct dpaa2_eth_priv *priv) |
| 1729 | { |
| 1730 | struct netdev_hw_addr *ha; |
| 1731 | int err; |
| 1732 | |
| 1733 | netdev_for_each_uc_addr(ha, net_dev) { |
| 1734 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, |
| 1735 | ha->addr); |
| 1736 | if (err) |
| 1737 | netdev_warn(priv->net_dev, |
| 1738 | "Could not add ucast MAC %pM to the filtering table (err %d)\n", |
| 1739 | ha->addr, err); |
| 1740 | } |
| 1741 | } |
| 1742 | |
| 1743 | /* Copy mac multicast addresses from @net_dev to @priv |
| 1744 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. |
| 1745 | */ |
| 1746 | static void add_mc_hw_addr(const struct net_device *net_dev, |
| 1747 | struct dpaa2_eth_priv *priv) |
| 1748 | { |
| 1749 | struct netdev_hw_addr *ha; |
| 1750 | int err; |
| 1751 | |
| 1752 | netdev_for_each_mc_addr(ha, net_dev) { |
| 1753 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, |
| 1754 | ha->addr); |
| 1755 | if (err) |
| 1756 | netdev_warn(priv->net_dev, |
| 1757 | "Could not add mcast MAC %pM to the filtering table (err %d)\n", |
| 1758 | ha->addr, err); |
| 1759 | } |
| 1760 | } |
| 1761 | |
| 1762 | static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) |
| 1763 | { |
| 1764 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 1765 | int uc_count = netdev_uc_count(net_dev); |
| 1766 | int mc_count = netdev_mc_count(net_dev); |
| 1767 | u8 max_mac = priv->dpni_attrs.mac_filter_entries; |
| 1768 | u32 options = priv->dpni_attrs.options; |
| 1769 | u16 mc_token = priv->mc_token; |
| 1770 | struct fsl_mc_io *mc_io = priv->mc_io; |
| 1771 | int err; |
| 1772 | |
| 1773 | /* Basic sanity checks; these probably indicate a misconfiguration */ |
| 1774 | if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) |
| 1775 | netdev_info(net_dev, |
| 1776 | "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", |
| 1777 | max_mac); |
| 1778 | |
| 1779 | /* Force promiscuous if the uc or mc counts exceed our capabilities. */ |
| 1780 | if (uc_count > max_mac) { |
| 1781 | netdev_info(net_dev, |
| 1782 | "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", |
| 1783 | uc_count, max_mac); |
| 1784 | goto force_promisc; |
| 1785 | } |
| 1786 | if (mc_count + uc_count > max_mac) { |
| 1787 | netdev_info(net_dev, |
| 1788 | "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", |
| 1789 | uc_count + mc_count, max_mac); |
| 1790 | goto force_mc_promisc; |
| 1791 | } |
| 1792 | |
| 1793 | /* Adjust promisc settings due to flag combinations */ |
| 1794 | if (net_dev->flags & IFF_PROMISC) |
| 1795 | goto force_promisc; |
| 1796 | if (net_dev->flags & IFF_ALLMULTI) { |
| 1797 | /* First, rebuild unicast filtering table. This should be done |
| 1798 | * in promisc mode, in order to avoid frame loss while we |
| 1799 | * progressively add entries to the table. |
| 1800 | * We don't know whether we had been in promisc already, and |
| 1801 | * making an MC call to find out is expensive; so set uc promisc |
| 1802 | * nonetheless. |
| 1803 | */ |
| 1804 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); |
| 1805 | if (err) |
| 1806 | netdev_warn(net_dev, "Can't set uc promisc\n"); |
| 1807 | |
| 1808 | /* Actual uc table reconstruction. */ |
| 1809 | err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); |
| 1810 | if (err) |
| 1811 | netdev_warn(net_dev, "Can't clear uc filters\n"); |
| 1812 | add_uc_hw_addr(net_dev, priv); |
| 1813 | |
| 1814 | /* Finally, clear uc promisc and set mc promisc as requested. */ |
| 1815 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); |
| 1816 | if (err) |
| 1817 | netdev_warn(net_dev, "Can't clear uc promisc\n"); |
| 1818 | goto force_mc_promisc; |
| 1819 | } |
| 1820 | |
| 1821 | /* Neither unicast, nor multicast promisc will be on... eventually. |
| 1822 | * For now, rebuild mac filtering tables while forcing both of them on. |
| 1823 | */ |
| 1824 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); |
| 1825 | if (err) |
| 1826 | netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); |
| 1827 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); |
| 1828 | if (err) |
| 1829 | netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); |
| 1830 | |
| 1831 | /* Actual mac filtering tables reconstruction */ |
| 1832 | err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); |
| 1833 | if (err) |
| 1834 | netdev_warn(net_dev, "Can't clear mac filters\n"); |
| 1835 | add_mc_hw_addr(net_dev, priv); |
| 1836 | add_uc_hw_addr(net_dev, priv); |
| 1837 | |
| 1838 | /* Now we can clear both ucast and mcast promisc, without risking |
| 1839 | * to drop legitimate frames anymore. |
| 1840 | */ |
| 1841 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); |
| 1842 | if (err) |
| 1843 | netdev_warn(net_dev, "Can't clear ucast promisc\n"); |
| 1844 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); |
| 1845 | if (err) |
| 1846 | netdev_warn(net_dev, "Can't clear mcast promisc\n"); |
| 1847 | |
| 1848 | return; |
| 1849 | |
| 1850 | force_promisc: |
| 1851 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); |
| 1852 | if (err) |
| 1853 | netdev_warn(net_dev, "Can't set ucast promisc\n"); |
| 1854 | force_mc_promisc: |
| 1855 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); |
| 1856 | if (err) |
| 1857 | netdev_warn(net_dev, "Can't set mcast promisc\n"); |
| 1858 | } |
| 1859 | |
| 1860 | static int dpaa2_eth_set_features(struct net_device *net_dev, |
| 1861 | netdev_features_t features) |
| 1862 | { |
| 1863 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 1864 | netdev_features_t changed = features ^ net_dev->features; |
| 1865 | bool enable; |
| 1866 | int err; |
| 1867 | |
| 1868 | if (changed & NETIF_F_RXCSUM) { |
| 1869 | enable = !!(features & NETIF_F_RXCSUM); |
| 1870 | err = set_rx_csum(priv, enable); |
| 1871 | if (err) |
| 1872 | return err; |
| 1873 | } |
| 1874 | |
| 1875 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { |
| 1876 | enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); |
| 1877 | err = set_tx_csum(priv, enable); |
| 1878 | if (err) |
| 1879 | return err; |
| 1880 | } |
| 1881 | |
| 1882 | return 0; |
| 1883 | } |
| 1884 | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 1885 | static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 1886 | { |
| 1887 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 1888 | struct hwtstamp_config config; |
| 1889 | |
| 1890 | if (copy_from_user(&config, rq->ifr_data, sizeof(config))) |
| 1891 | return -EFAULT; |
| 1892 | |
| 1893 | switch (config.tx_type) { |
| 1894 | case HWTSTAMP_TX_OFF: |
| 1895 | priv->tx_tstamp = false; |
| 1896 | break; |
| 1897 | case HWTSTAMP_TX_ON: |
| 1898 | priv->tx_tstamp = true; |
| 1899 | break; |
| 1900 | default: |
| 1901 | return -ERANGE; |
| 1902 | } |
| 1903 | |
| 1904 | if (config.rx_filter == HWTSTAMP_FILTER_NONE) { |
| 1905 | priv->rx_tstamp = false; |
| 1906 | } else { |
| 1907 | priv->rx_tstamp = true; |
| 1908 | /* TS is set for all frame types, not only those requested */ |
| 1909 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
| 1910 | } |
| 1911 | |
| 1912 | return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? |
| 1913 | -EFAULT : 0; |
| 1914 | } |
| 1915 | |
| 1916 | static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 1917 | { |
Russell King | 4a84182 | 2020-02-27 12:00:21 +0000 | [diff] [blame] | 1918 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 1919 | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 1920 | if (cmd == SIOCSHWTSTAMP) |
| 1921 | return dpaa2_eth_ts_ioctl(dev, rq, cmd); |
| 1922 | |
Russell King | 4a84182 | 2020-02-27 12:00:21 +0000 | [diff] [blame] | 1923 | if (priv->mac) |
| 1924 | return phylink_mii_ioctl(priv->mac->phylink, rq, cmd); |
| 1925 | |
| 1926 | return -EOPNOTSUPP; |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 1927 | } |
| 1928 | |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 1929 | static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) |
| 1930 | { |
| 1931 | int mfl, linear_mfl; |
| 1932 | |
| 1933 | mfl = DPAA2_ETH_L2_MAX_FRM(mtu); |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 1934 | linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - |
Ioana Ciocoi Radulescu | 7b1eea1 | 2018-11-26 16:27:30 +0000 | [diff] [blame] | 1935 | dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 1936 | |
| 1937 | if (mfl > linear_mfl) { |
| 1938 | netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", |
| 1939 | linear_mfl - VLAN_ETH_HLEN); |
| 1940 | return false; |
| 1941 | } |
| 1942 | |
| 1943 | return true; |
| 1944 | } |
| 1945 | |
| 1946 | static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) |
| 1947 | { |
| 1948 | int mfl, err; |
| 1949 | |
| 1950 | /* We enforce a maximum Rx frame length based on MTU only if we have |
| 1951 | * an XDP program attached (in order to avoid Rx S/G frames). |
| 1952 | * Otherwise, we accept all incoming frames as long as they are not |
| 1953 | * larger than maximum size supported in hardware |
| 1954 | */ |
| 1955 | if (has_xdp) |
| 1956 | mfl = DPAA2_ETH_L2_MAX_FRM(mtu); |
| 1957 | else |
| 1958 | mfl = DPAA2_ETH_MFL; |
| 1959 | |
| 1960 | err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); |
| 1961 | if (err) { |
| 1962 | netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); |
| 1963 | return err; |
| 1964 | } |
| 1965 | |
| 1966 | return 0; |
| 1967 | } |
| 1968 | |
| 1969 | static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) |
| 1970 | { |
| 1971 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 1972 | int err; |
| 1973 | |
| 1974 | if (!priv->xdp_prog) |
| 1975 | goto out; |
| 1976 | |
| 1977 | if (!xdp_mtu_valid(priv, new_mtu)) |
| 1978 | return -EINVAL; |
| 1979 | |
| 1980 | err = set_rx_mfl(priv, new_mtu, true); |
| 1981 | if (err) |
| 1982 | return err; |
| 1983 | |
| 1984 | out: |
| 1985 | dev->mtu = new_mtu; |
| 1986 | return 0; |
| 1987 | } |
| 1988 | |
Ioana Ciocoi Radulescu | 7b1eea1 | 2018-11-26 16:27:30 +0000 | [diff] [blame] | 1989 | static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) |
| 1990 | { |
| 1991 | struct dpni_buffer_layout buf_layout = {0}; |
| 1992 | int err; |
| 1993 | |
| 1994 | err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, |
| 1995 | DPNI_QUEUE_RX, &buf_layout); |
| 1996 | if (err) { |
| 1997 | netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); |
| 1998 | return err; |
| 1999 | } |
| 2000 | |
| 2001 | /* Reserve extra headroom for XDP header size changes */ |
| 2002 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + |
| 2003 | (has_xdp ? XDP_PACKET_HEADROOM : 0); |
| 2004 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; |
| 2005 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
| 2006 | DPNI_QUEUE_RX, &buf_layout); |
| 2007 | if (err) { |
| 2008 | netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); |
| 2009 | return err; |
| 2010 | } |
| 2011 | |
| 2012 | return 0; |
| 2013 | } |
| 2014 | |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 2015 | static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) |
| 2016 | { |
| 2017 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 2018 | struct dpaa2_eth_channel *ch; |
| 2019 | struct bpf_prog *old; |
| 2020 | bool up, need_update; |
| 2021 | int i, err; |
| 2022 | |
| 2023 | if (prog && !xdp_mtu_valid(priv, dev->mtu)) |
| 2024 | return -EINVAL; |
| 2025 | |
Andrii Nakryiko | 85192db | 2019-11-17 09:28:03 -0800 | [diff] [blame] | 2026 | if (prog) |
| 2027 | bpf_prog_add(prog, priv->num_channels); |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 2028 | |
| 2029 | up = netif_running(dev); |
| 2030 | need_update = (!!priv->xdp_prog != !!prog); |
| 2031 | |
| 2032 | if (up) |
| 2033 | dpaa2_eth_stop(dev); |
| 2034 | |
Ioana Ciocoi Radulescu | 7b1eea1 | 2018-11-26 16:27:30 +0000 | [diff] [blame] | 2035 | /* While in xdp mode, enforce a maximum Rx frame size based on MTU. |
| 2036 | * Also, when switching between xdp/non-xdp modes we need to reconfigure |
| 2037 | * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, |
| 2038 | * so we are sure no old format buffers will be used from now on. |
| 2039 | */ |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 2040 | if (need_update) { |
| 2041 | err = set_rx_mfl(priv, dev->mtu, !!prog); |
| 2042 | if (err) |
| 2043 | goto out_err; |
Ioana Ciocoi Radulescu | 7b1eea1 | 2018-11-26 16:27:30 +0000 | [diff] [blame] | 2044 | err = update_rx_buffer_headroom(priv, !!prog); |
| 2045 | if (err) |
| 2046 | goto out_err; |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 2047 | } |
| 2048 | |
| 2049 | old = xchg(&priv->xdp_prog, prog); |
| 2050 | if (old) |
| 2051 | bpf_prog_put(old); |
| 2052 | |
| 2053 | for (i = 0; i < priv->num_channels; i++) { |
| 2054 | ch = priv->channel[i]; |
| 2055 | old = xchg(&ch->xdp.prog, prog); |
| 2056 | if (old) |
| 2057 | bpf_prog_put(old); |
| 2058 | } |
| 2059 | |
| 2060 | if (up) { |
| 2061 | err = dpaa2_eth_open(dev); |
| 2062 | if (err) |
| 2063 | return err; |
| 2064 | } |
| 2065 | |
| 2066 | return 0; |
| 2067 | |
| 2068 | out_err: |
| 2069 | if (prog) |
| 2070 | bpf_prog_sub(prog, priv->num_channels); |
| 2071 | if (up) |
| 2072 | dpaa2_eth_open(dev); |
| 2073 | |
| 2074 | return err; |
| 2075 | } |
| 2076 | |
| 2077 | static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
| 2078 | { |
| 2079 | struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 2080 | |
| 2081 | switch (xdp->command) { |
| 2082 | case XDP_SETUP_PROG: |
| 2083 | return setup_xdp(dev, xdp->prog); |
| 2084 | case XDP_QUERY_PROG: |
| 2085 | xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; |
| 2086 | break; |
| 2087 | default: |
| 2088 | return -EINVAL; |
| 2089 | } |
| 2090 | |
| 2091 | return 0; |
| 2092 | } |
| 2093 | |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2094 | static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev, |
| 2095 | struct xdp_frame *xdpf, |
| 2096 | struct dpaa2_fd *fd) |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2097 | { |
| 2098 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 2099 | struct device *dev = net_dev->dev.parent; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2100 | unsigned int needed_headroom; |
| 2101 | struct dpaa2_eth_swa *swa; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2102 | void *buffer_start, *aligned_start; |
| 2103 | dma_addr_t addr; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2104 | |
| 2105 | /* We require a minimum headroom to be able to transmit the frame. |
| 2106 | * Otherwise return an error and let the original net_device handle it |
| 2107 | */ |
| 2108 | needed_headroom = dpaa2_eth_needed_headroom(priv, NULL); |
| 2109 | if (xdpf->headroom < needed_headroom) |
| 2110 | return -EINVAL; |
| 2111 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2112 | /* Setup the FD fields */ |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2113 | memset(fd, 0, sizeof(*fd)); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2114 | |
| 2115 | /* Align FD address, if possible */ |
| 2116 | buffer_start = xdpf->data - needed_headroom; |
| 2117 | aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, |
| 2118 | DPAA2_ETH_TX_BUF_ALIGN); |
| 2119 | if (aligned_start >= xdpf->data - xdpf->headroom) |
| 2120 | buffer_start = aligned_start; |
| 2121 | |
| 2122 | swa = (struct dpaa2_eth_swa *)buffer_start; |
| 2123 | /* fill in necessary fields here */ |
| 2124 | swa->type = DPAA2_ETH_SWA_XDP; |
| 2125 | swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; |
| 2126 | swa->xdp.xdpf = xdpf; |
| 2127 | |
| 2128 | addr = dma_map_single(dev, buffer_start, |
| 2129 | swa->xdp.dma_size, |
| 2130 | DMA_BIDIRECTIONAL); |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2131 | if (unlikely(dma_mapping_error(dev, addr))) |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2132 | return -ENOMEM; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2133 | |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2134 | dpaa2_fd_set_addr(fd, addr); |
| 2135 | dpaa2_fd_set_offset(fd, xdpf->data - buffer_start); |
| 2136 | dpaa2_fd_set_len(fd, xdpf->len); |
| 2137 | dpaa2_fd_set_format(fd, dpaa2_fd_single); |
| 2138 | dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2139 | |
| 2140 | return 0; |
| 2141 | } |
| 2142 | |
| 2143 | static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, |
| 2144 | struct xdp_frame **frames, u32 flags) |
| 2145 | { |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2146 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2147 | struct dpaa2_eth_xdp_fds *xdp_redirect_fds; |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2148 | struct rtnl_link_stats64 *percpu_stats; |
| 2149 | struct dpaa2_eth_fq *fq; |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2150 | struct dpaa2_fd *fds; |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2151 | int enqueued, i, err; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2152 | |
| 2153 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
| 2154 | return -EINVAL; |
| 2155 | |
| 2156 | if (!netif_running(net_dev)) |
| 2157 | return -ENETDOWN; |
| 2158 | |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2159 | fq = &priv->fq[smp_processor_id()]; |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2160 | xdp_redirect_fds = &fq->xdp_redirect_fds; |
| 2161 | fds = xdp_redirect_fds->fds; |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2162 | |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2163 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
Ioana Ciornei | 6aa40b9 | 2020-04-22 15:05:12 +0300 | [diff] [blame] | 2164 | |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2165 | /* create a FD for each xdp_frame in the list received */ |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2166 | for (i = 0; i < n; i++) { |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2167 | err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]); |
| 2168 | if (err) |
| 2169 | break; |
| 2170 | } |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2171 | xdp_redirect_fds->num = i; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2172 | |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2173 | /* enqueue all the frame descriptors */ |
| 2174 | enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds); |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2175 | |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2176 | /* update statistics */ |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2177 | percpu_stats->tx_packets += enqueued; |
| 2178 | for (i = 0; i < enqueued; i++) |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2179 | percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2180 | for (i = enqueued; i < n; i++) |
Ioana Ciornei | 8665d97 | 2020-04-22 15:05:13 +0300 | [diff] [blame] | 2181 | xdp_return_frame_rx_napi(frames[i]); |
| 2182 | |
Ioana Ciornei | 38c440b | 2020-05-06 20:47:17 +0300 | [diff] [blame] | 2183 | return enqueued; |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2184 | } |
| 2185 | |
Ioana Radulescu | 06d5b17 | 2019-06-11 14:50:01 +0300 | [diff] [blame] | 2186 | static int update_xps(struct dpaa2_eth_priv *priv) |
| 2187 | { |
| 2188 | struct net_device *net_dev = priv->net_dev; |
| 2189 | struct cpumask xps_mask; |
| 2190 | struct dpaa2_eth_fq *fq; |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2191 | int i, num_queues, netdev_queues; |
Ioana Radulescu | 06d5b17 | 2019-06-11 14:50:01 +0300 | [diff] [blame] | 2192 | int err = 0; |
| 2193 | |
| 2194 | num_queues = dpaa2_eth_queue_count(priv); |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2195 | netdev_queues = (net_dev->num_tc ? : 1) * num_queues; |
Ioana Radulescu | 06d5b17 | 2019-06-11 14:50:01 +0300 | [diff] [blame] | 2196 | |
| 2197 | /* The first <num_queues> entries in priv->fq array are Tx/Tx conf |
| 2198 | * queues, so only process those |
| 2199 | */ |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2200 | for (i = 0; i < netdev_queues; i++) { |
| 2201 | fq = &priv->fq[i % num_queues]; |
Ioana Radulescu | 06d5b17 | 2019-06-11 14:50:01 +0300 | [diff] [blame] | 2202 | |
| 2203 | cpumask_clear(&xps_mask); |
| 2204 | cpumask_set_cpu(fq->target_cpu, &xps_mask); |
| 2205 | |
| 2206 | err = netif_set_xps_queue(net_dev, &xps_mask, i); |
| 2207 | if (err) { |
| 2208 | netdev_warn_once(net_dev, "Error setting XPS queue\n"); |
| 2209 | break; |
| 2210 | } |
| 2211 | } |
| 2212 | |
| 2213 | return err; |
| 2214 | } |
| 2215 | |
Ioana Ciornei | e3ec13b | 2020-07-21 19:38:23 +0300 | [diff] [blame^] | 2216 | static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, |
| 2217 | struct tc_mqprio_qopt *mqprio) |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2218 | { |
| 2219 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2220 | u8 num_tc, num_queues; |
| 2221 | int i; |
| 2222 | |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2223 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
| 2224 | num_queues = dpaa2_eth_queue_count(priv); |
| 2225 | num_tc = mqprio->num_tc; |
| 2226 | |
| 2227 | if (num_tc == net_dev->num_tc) |
| 2228 | return 0; |
| 2229 | |
| 2230 | if (num_tc > dpaa2_eth_tc_count(priv)) { |
| 2231 | netdev_err(net_dev, "Max %d traffic classes supported\n", |
| 2232 | dpaa2_eth_tc_count(priv)); |
Jesper Dangaard Brouer | b89c1e6 | 2020-04-23 16:57:50 +0200 | [diff] [blame] | 2233 | return -EOPNOTSUPP; |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2234 | } |
| 2235 | |
| 2236 | if (!num_tc) { |
| 2237 | netdev_reset_tc(net_dev); |
| 2238 | netif_set_real_num_tx_queues(net_dev, num_queues); |
| 2239 | goto out; |
| 2240 | } |
| 2241 | |
| 2242 | netdev_set_num_tc(net_dev, num_tc); |
| 2243 | netif_set_real_num_tx_queues(net_dev, num_tc * num_queues); |
| 2244 | |
| 2245 | for (i = 0; i < num_tc; i++) |
| 2246 | netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues); |
| 2247 | |
| 2248 | out: |
| 2249 | update_xps(priv); |
| 2250 | |
| 2251 | return 0; |
| 2252 | } |
| 2253 | |
Ioana Ciornei | e3ec13b | 2020-07-21 19:38:23 +0300 | [diff] [blame^] | 2254 | static int dpaa2_eth_setup_tc(struct net_device *net_dev, |
| 2255 | enum tc_setup_type type, void *type_data) |
| 2256 | { |
| 2257 | switch (type) { |
| 2258 | case TC_SETUP_QDISC_MQPRIO: |
| 2259 | return dpaa2_eth_setup_mqprio(net_dev, type_data); |
| 2260 | default: |
| 2261 | return -EOPNOTSUPP; |
| 2262 | } |
| 2263 | } |
| 2264 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2265 | static const struct net_device_ops dpaa2_eth_ops = { |
| 2266 | .ndo_open = dpaa2_eth_open, |
| 2267 | .ndo_start_xmit = dpaa2_eth_tx, |
| 2268 | .ndo_stop = dpaa2_eth_stop, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2269 | .ndo_set_mac_address = dpaa2_eth_set_addr, |
| 2270 | .ndo_get_stats64 = dpaa2_eth_get_stats, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2271 | .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, |
| 2272 | .ndo_set_features = dpaa2_eth_set_features, |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 2273 | .ndo_do_ioctl = dpaa2_eth_ioctl, |
Ioana Ciocoi Radulescu | 7e273a8 | 2018-11-26 16:27:29 +0000 | [diff] [blame] | 2274 | .ndo_change_mtu = dpaa2_eth_change_mtu, |
| 2275 | .ndo_bpf = dpaa2_eth_xdp, |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 2276 | .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 2277 | .ndo_setup_tc = dpaa2_eth_setup_tc, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2278 | }; |
| 2279 | |
| 2280 | static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) |
| 2281 | { |
| 2282 | struct dpaa2_eth_channel *ch; |
| 2283 | |
| 2284 | ch = container_of(ctx, struct dpaa2_eth_channel, nctx); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 2285 | |
| 2286 | /* Update NAPI statistics */ |
| 2287 | ch->stats.cdan++; |
| 2288 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2289 | napi_schedule_irqoff(&ch->napi); |
| 2290 | } |
| 2291 | |
| 2292 | /* Allocate and configure a DPCON object */ |
| 2293 | static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) |
| 2294 | { |
| 2295 | struct fsl_mc_device *dpcon; |
| 2296 | struct device *dev = priv->net_dev->dev.parent; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2297 | int err; |
| 2298 | |
| 2299 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), |
| 2300 | FSL_MC_POOL_DPCON, &dpcon); |
| 2301 | if (err) { |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2302 | if (err == -ENXIO) |
| 2303 | err = -EPROBE_DEFER; |
| 2304 | else |
| 2305 | dev_info(dev, "Not enough DPCONs, will go on as-is\n"); |
| 2306 | return ERR_PTR(err); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2307 | } |
| 2308 | |
| 2309 | err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); |
| 2310 | if (err) { |
| 2311 | dev_err(dev, "dpcon_open() failed\n"); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 2312 | goto free; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2313 | } |
| 2314 | |
| 2315 | err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); |
| 2316 | if (err) { |
| 2317 | dev_err(dev, "dpcon_reset() failed\n"); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 2318 | goto close; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2319 | } |
| 2320 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2321 | err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); |
| 2322 | if (err) { |
| 2323 | dev_err(dev, "dpcon_enable() failed\n"); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 2324 | goto close; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2325 | } |
| 2326 | |
| 2327 | return dpcon; |
| 2328 | |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 2329 | close: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2330 | dpcon_close(priv->mc_io, 0, dpcon->mc_handle); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 2331 | free: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2332 | fsl_mc_object_free(dpcon); |
| 2333 | |
| 2334 | return NULL; |
| 2335 | } |
| 2336 | |
| 2337 | static void free_dpcon(struct dpaa2_eth_priv *priv, |
| 2338 | struct fsl_mc_device *dpcon) |
| 2339 | { |
| 2340 | dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); |
| 2341 | dpcon_close(priv->mc_io, 0, dpcon->mc_handle); |
| 2342 | fsl_mc_object_free(dpcon); |
| 2343 | } |
| 2344 | |
| 2345 | static struct dpaa2_eth_channel * |
| 2346 | alloc_channel(struct dpaa2_eth_priv *priv) |
| 2347 | { |
| 2348 | struct dpaa2_eth_channel *channel; |
| 2349 | struct dpcon_attr attr; |
| 2350 | struct device *dev = priv->net_dev->dev.parent; |
| 2351 | int err; |
| 2352 | |
| 2353 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); |
| 2354 | if (!channel) |
| 2355 | return NULL; |
| 2356 | |
| 2357 | channel->dpcon = setup_dpcon(priv); |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2358 | if (IS_ERR_OR_NULL(channel->dpcon)) { |
Ioana Radulescu | bd8460f | 2019-05-24 18:15:16 +0300 | [diff] [blame] | 2359 | err = PTR_ERR_OR_ZERO(channel->dpcon); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2360 | goto err_setup; |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2361 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2362 | |
| 2363 | err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, |
| 2364 | &attr); |
| 2365 | if (err) { |
| 2366 | dev_err(dev, "dpcon_get_attributes() failed\n"); |
| 2367 | goto err_get_attr; |
| 2368 | } |
| 2369 | |
| 2370 | channel->dpcon_id = attr.id; |
| 2371 | channel->ch_id = attr.qbman_ch_id; |
| 2372 | channel->priv = priv; |
| 2373 | |
| 2374 | return channel; |
| 2375 | |
| 2376 | err_get_attr: |
| 2377 | free_dpcon(priv, channel->dpcon); |
| 2378 | err_setup: |
| 2379 | kfree(channel); |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2380 | return ERR_PTR(err); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2381 | } |
| 2382 | |
| 2383 | static void free_channel(struct dpaa2_eth_priv *priv, |
| 2384 | struct dpaa2_eth_channel *channel) |
| 2385 | { |
| 2386 | free_dpcon(priv, channel->dpcon); |
| 2387 | kfree(channel); |
| 2388 | } |
| 2389 | |
| 2390 | /* DPIO setup: allocate and configure QBMan channels, setup core affinity |
| 2391 | * and register data availability notifications |
| 2392 | */ |
| 2393 | static int setup_dpio(struct dpaa2_eth_priv *priv) |
| 2394 | { |
| 2395 | struct dpaa2_io_notification_ctx *nctx; |
| 2396 | struct dpaa2_eth_channel *channel; |
| 2397 | struct dpcon_notification_cfg dpcon_notif_cfg; |
| 2398 | struct device *dev = priv->net_dev->dev.parent; |
| 2399 | int i, err; |
| 2400 | |
| 2401 | /* We want the ability to spread ingress traffic (RX, TX conf) to as |
| 2402 | * many cores as possible, so we need one channel for each core |
| 2403 | * (unless there's fewer queues than cores, in which case the extra |
| 2404 | * channels would be wasted). |
| 2405 | * Allocate one channel per core and register it to the core's |
| 2406 | * affine DPIO. If not enough channels are available for all cores |
| 2407 | * or if some cores don't have an affine DPIO, there will be no |
| 2408 | * ingress frame processing on those cores. |
| 2409 | */ |
| 2410 | cpumask_clear(&priv->dpio_cpumask); |
| 2411 | for_each_online_cpu(i) { |
| 2412 | /* Try to allocate a channel */ |
| 2413 | channel = alloc_channel(priv); |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2414 | if (IS_ERR_OR_NULL(channel)) { |
Ioana Radulescu | bd8460f | 2019-05-24 18:15:16 +0300 | [diff] [blame] | 2415 | err = PTR_ERR_OR_ZERO(channel); |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2416 | if (err != -EPROBE_DEFER) |
| 2417 | dev_info(dev, |
| 2418 | "No affine channel for cpu %d and above\n", i); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2419 | goto err_alloc_ch; |
| 2420 | } |
| 2421 | |
| 2422 | priv->channel[priv->num_channels] = channel; |
| 2423 | |
| 2424 | nctx = &channel->nctx; |
| 2425 | nctx->is_cdan = 1; |
| 2426 | nctx->cb = cdan_cb; |
| 2427 | nctx->id = channel->ch_id; |
| 2428 | nctx->desired_cpu = i; |
| 2429 | |
| 2430 | /* Register the new context */ |
Ioana Radulescu | 7ec0596 | 2018-01-05 05:04:32 -0600 | [diff] [blame] | 2431 | channel->dpio = dpaa2_io_service_select(i); |
Ioana Ciornei | 47441f7 | 2018-12-10 16:50:19 +0000 | [diff] [blame] | 2432 | err = dpaa2_io_service_register(channel->dpio, nctx, dev); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2433 | if (err) { |
Ioana Radulescu | 5206d8d | 2017-06-06 10:00:33 -0500 | [diff] [blame] | 2434 | dev_dbg(dev, "No affine DPIO for cpu %d\n", i); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2435 | /* If no affine DPIO for this core, there's probably |
Ioana Radulescu | 5206d8d | 2017-06-06 10:00:33 -0500 | [diff] [blame] | 2436 | * none available for next cores either. Signal we want |
| 2437 | * to retry later, in case the DPIO devices weren't |
| 2438 | * probed yet. |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2439 | */ |
Ioana Radulescu | 5206d8d | 2017-06-06 10:00:33 -0500 | [diff] [blame] | 2440 | err = -EPROBE_DEFER; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2441 | goto err_service_reg; |
| 2442 | } |
| 2443 | |
| 2444 | /* Register DPCON notification with MC */ |
| 2445 | dpcon_notif_cfg.dpio_id = nctx->dpio_id; |
| 2446 | dpcon_notif_cfg.priority = 0; |
| 2447 | dpcon_notif_cfg.user_ctx = nctx->qman64; |
| 2448 | err = dpcon_set_notification(priv->mc_io, 0, |
| 2449 | channel->dpcon->mc_handle, |
| 2450 | &dpcon_notif_cfg); |
| 2451 | if (err) { |
| 2452 | dev_err(dev, "dpcon_set_notification failed()\n"); |
| 2453 | goto err_set_cdan; |
| 2454 | } |
| 2455 | |
| 2456 | /* If we managed to allocate a channel and also found an affine |
| 2457 | * DPIO for this core, add it to the final mask |
| 2458 | */ |
| 2459 | cpumask_set_cpu(i, &priv->dpio_cpumask); |
| 2460 | priv->num_channels++; |
| 2461 | |
| 2462 | /* Stop if we already have enough channels to accommodate all |
| 2463 | * RX and TX conf queues |
| 2464 | */ |
Ioana Ciocoi Radulescu | b0e4f37 | 2018-11-14 11:48:35 +0000 | [diff] [blame] | 2465 | if (priv->num_channels == priv->dpni_attrs.num_queues) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2466 | break; |
| 2467 | } |
| 2468 | |
| 2469 | return 0; |
| 2470 | |
| 2471 | err_set_cdan: |
Ioana Ciornei | 47441f7 | 2018-12-10 16:50:19 +0000 | [diff] [blame] | 2472 | dpaa2_io_service_deregister(channel->dpio, nctx, dev); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2473 | err_service_reg: |
| 2474 | free_channel(priv, channel); |
| 2475 | err_alloc_ch: |
Ioana Ciornei | 5aa4277 | 2019-11-12 18:21:52 +0200 | [diff] [blame] | 2476 | if (err == -EPROBE_DEFER) { |
| 2477 | for (i = 0; i < priv->num_channels; i++) { |
| 2478 | channel = priv->channel[i]; |
| 2479 | nctx = &channel->nctx; |
| 2480 | dpaa2_io_service_deregister(channel->dpio, nctx, dev); |
| 2481 | free_channel(priv, channel); |
| 2482 | } |
| 2483 | priv->num_channels = 0; |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2484 | return err; |
Ioana Ciornei | 5aa4277 | 2019-11-12 18:21:52 +0200 | [diff] [blame] | 2485 | } |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2486 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2487 | if (cpumask_empty(&priv->dpio_cpumask)) { |
| 2488 | dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2489 | return -ENODEV; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2490 | } |
| 2491 | |
| 2492 | dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", |
| 2493 | cpumask_pr_args(&priv->dpio_cpumask)); |
| 2494 | |
| 2495 | return 0; |
| 2496 | } |
| 2497 | |
| 2498 | static void free_dpio(struct dpaa2_eth_priv *priv) |
| 2499 | { |
Ioana Ciornei | 47441f7 | 2018-12-10 16:50:19 +0000 | [diff] [blame] | 2500 | struct device *dev = priv->net_dev->dev.parent; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2501 | struct dpaa2_eth_channel *ch; |
Ioana Ciornei | 47441f7 | 2018-12-10 16:50:19 +0000 | [diff] [blame] | 2502 | int i; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2503 | |
| 2504 | /* deregister CDAN notifications and free channels */ |
| 2505 | for (i = 0; i < priv->num_channels; i++) { |
| 2506 | ch = priv->channel[i]; |
Ioana Ciornei | 47441f7 | 2018-12-10 16:50:19 +0000 | [diff] [blame] | 2507 | dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2508 | free_channel(priv, ch); |
| 2509 | } |
| 2510 | } |
| 2511 | |
| 2512 | static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, |
| 2513 | int cpu) |
| 2514 | { |
| 2515 | struct device *dev = priv->net_dev->dev.parent; |
| 2516 | int i; |
| 2517 | |
| 2518 | for (i = 0; i < priv->num_channels; i++) |
| 2519 | if (priv->channel[i]->nctx.desired_cpu == cpu) |
| 2520 | return priv->channel[i]; |
| 2521 | |
| 2522 | /* We should never get here. Issue a warning and return |
| 2523 | * the first channel, because it's still better than nothing |
| 2524 | */ |
| 2525 | dev_warn(dev, "No affine channel found for cpu %d\n", cpu); |
| 2526 | |
| 2527 | return priv->channel[0]; |
| 2528 | } |
| 2529 | |
| 2530 | static void set_fq_affinity(struct dpaa2_eth_priv *priv) |
| 2531 | { |
| 2532 | struct device *dev = priv->net_dev->dev.parent; |
| 2533 | struct dpaa2_eth_fq *fq; |
| 2534 | int rx_cpu, txc_cpu; |
Ioana Radulescu | 06d5b17 | 2019-06-11 14:50:01 +0300 | [diff] [blame] | 2535 | int i; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2536 | |
| 2537 | /* For each FQ, pick one channel/CPU to deliver frames to. |
| 2538 | * This may well change at runtime, either through irqbalance or |
| 2539 | * through direct user intervention. |
| 2540 | */ |
| 2541 | rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); |
| 2542 | |
| 2543 | for (i = 0; i < priv->num_fqs; i++) { |
| 2544 | fq = &priv->fq[i]; |
| 2545 | switch (fq->type) { |
| 2546 | case DPAA2_RX_FQ: |
| 2547 | fq->target_cpu = rx_cpu; |
| 2548 | rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); |
| 2549 | if (rx_cpu >= nr_cpu_ids) |
| 2550 | rx_cpu = cpumask_first(&priv->dpio_cpumask); |
| 2551 | break; |
| 2552 | case DPAA2_TX_CONF_FQ: |
| 2553 | fq->target_cpu = txc_cpu; |
| 2554 | txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); |
| 2555 | if (txc_cpu >= nr_cpu_ids) |
| 2556 | txc_cpu = cpumask_first(&priv->dpio_cpumask); |
| 2557 | break; |
| 2558 | default: |
| 2559 | dev_err(dev, "Unknown FQ type: %d\n", fq->type); |
| 2560 | } |
| 2561 | fq->channel = get_affine_channel(priv, fq->target_cpu); |
| 2562 | } |
Ioana Radulescu | 06d5b17 | 2019-06-11 14:50:01 +0300 | [diff] [blame] | 2563 | |
| 2564 | update_xps(priv); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2565 | } |
| 2566 | |
| 2567 | static void setup_fqs(struct dpaa2_eth_priv *priv) |
| 2568 | { |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 2569 | int i, j; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2570 | |
| 2571 | /* We have one TxConf FQ per Tx flow. |
| 2572 | * The number of Tx and Rx queues is the same. |
| 2573 | * Tx queues come first in the fq array. |
| 2574 | */ |
| 2575 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { |
| 2576 | priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; |
| 2577 | priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; |
| 2578 | priv->fq[priv->num_fqs++].flowid = (u16)i; |
| 2579 | } |
| 2580 | |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 2581 | for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { |
| 2582 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { |
| 2583 | priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; |
| 2584 | priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; |
| 2585 | priv->fq[priv->num_fqs].tc = (u8)j; |
| 2586 | priv->fq[priv->num_fqs++].flowid = (u16)i; |
| 2587 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2588 | } |
| 2589 | |
| 2590 | /* For each FQ, decide on which core to process incoming frames */ |
| 2591 | set_fq_affinity(priv); |
| 2592 | } |
| 2593 | |
| 2594 | /* Allocate and configure one buffer pool for each interface */ |
| 2595 | static int setup_dpbp(struct dpaa2_eth_priv *priv) |
| 2596 | { |
| 2597 | int err; |
| 2598 | struct fsl_mc_device *dpbp_dev; |
| 2599 | struct device *dev = priv->net_dev->dev.parent; |
Ioana Radulescu | 05fa39c | 2017-06-06 10:00:37 -0500 | [diff] [blame] | 2600 | struct dpbp_attr dpbp_attrs; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2601 | |
| 2602 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, |
| 2603 | &dpbp_dev); |
| 2604 | if (err) { |
Ioana Ciornei | d7f5a9d | 2018-11-09 15:26:45 +0000 | [diff] [blame] | 2605 | if (err == -ENXIO) |
| 2606 | err = -EPROBE_DEFER; |
| 2607 | else |
| 2608 | dev_err(dev, "DPBP device allocation failed\n"); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2609 | return err; |
| 2610 | } |
| 2611 | |
| 2612 | priv->dpbp_dev = dpbp_dev; |
| 2613 | |
| 2614 | err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, |
| 2615 | &dpbp_dev->mc_handle); |
| 2616 | if (err) { |
| 2617 | dev_err(dev, "dpbp_open() failed\n"); |
| 2618 | goto err_open; |
| 2619 | } |
| 2620 | |
Ioana Radulescu | d00defe | 2017-06-06 10:00:32 -0500 | [diff] [blame] | 2621 | err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); |
| 2622 | if (err) { |
| 2623 | dev_err(dev, "dpbp_reset() failed\n"); |
| 2624 | goto err_reset; |
| 2625 | } |
| 2626 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2627 | err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); |
| 2628 | if (err) { |
| 2629 | dev_err(dev, "dpbp_enable() failed\n"); |
| 2630 | goto err_enable; |
| 2631 | } |
| 2632 | |
| 2633 | err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, |
Ioana Radulescu | 05fa39c | 2017-06-06 10:00:37 -0500 | [diff] [blame] | 2634 | &dpbp_attrs); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2635 | if (err) { |
| 2636 | dev_err(dev, "dpbp_get_attributes() failed\n"); |
| 2637 | goto err_get_attr; |
| 2638 | } |
Ioana Radulescu | 05fa39c | 2017-06-06 10:00:37 -0500 | [diff] [blame] | 2639 | priv->bpid = dpbp_attrs.bpid; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2640 | |
| 2641 | return 0; |
| 2642 | |
| 2643 | err_get_attr: |
| 2644 | dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); |
| 2645 | err_enable: |
Ioana Radulescu | d00defe | 2017-06-06 10:00:32 -0500 | [diff] [blame] | 2646 | err_reset: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2647 | dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); |
| 2648 | err_open: |
| 2649 | fsl_mc_object_free(dpbp_dev); |
| 2650 | |
| 2651 | return err; |
| 2652 | } |
| 2653 | |
| 2654 | static void free_dpbp(struct dpaa2_eth_priv *priv) |
| 2655 | { |
| 2656 | drain_pool(priv); |
| 2657 | dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); |
| 2658 | dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); |
| 2659 | fsl_mc_object_free(priv->dpbp_dev); |
| 2660 | } |
| 2661 | |
Ioana Radulescu | 308f64e | 2017-10-29 08:20:40 +0000 | [diff] [blame] | 2662 | static int set_buffer_layout(struct dpaa2_eth_priv *priv) |
| 2663 | { |
| 2664 | struct device *dev = priv->net_dev->dev.parent; |
| 2665 | struct dpni_buffer_layout buf_layout = {0}; |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 2666 | u16 rx_buf_align; |
Ioana Radulescu | 308f64e | 2017-10-29 08:20:40 +0000 | [diff] [blame] | 2667 | int err; |
| 2668 | |
Bogdan Purcareata | 8a4fd87 | 2017-10-29 08:20:42 +0000 | [diff] [blame] | 2669 | /* We need to check for WRIOP version 1.0.0, but depending on the MC |
| 2670 | * version, this number is not always provided correctly on rev1. |
| 2671 | * We need to check for both alternatives in this situation. |
| 2672 | */ |
| 2673 | if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || |
| 2674 | priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 2675 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; |
Bogdan Purcareata | 8a4fd87 | 2017-10-29 08:20:42 +0000 | [diff] [blame] | 2676 | else |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 2677 | rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; |
Bogdan Purcareata | 8a4fd87 | 2017-10-29 08:20:42 +0000 | [diff] [blame] | 2678 | |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 2679 | /* We need to ensure that the buffer size seen by WRIOP is a multiple |
| 2680 | * of 64 or 256 bytes depending on the WRIOP version. |
| 2681 | */ |
| 2682 | priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); |
| 2683 | |
Bogdan Purcareata | 4b2d9fe | 2017-10-29 08:20:43 +0000 | [diff] [blame] | 2684 | /* tx buffer */ |
Ioana Radulescu | 308f64e | 2017-10-29 08:20:40 +0000 | [diff] [blame] | 2685 | buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 2686 | buf_layout.pass_timestamp = true; |
| 2687 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | |
| 2688 | DPNI_BUF_LAYOUT_OPT_TIMESTAMP; |
Ioana Radulescu | 308f64e | 2017-10-29 08:20:40 +0000 | [diff] [blame] | 2689 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
| 2690 | DPNI_QUEUE_TX, &buf_layout); |
| 2691 | if (err) { |
| 2692 | dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); |
| 2693 | return err; |
| 2694 | } |
| 2695 | |
| 2696 | /* tx-confirm buffer */ |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 2697 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; |
Ioana Radulescu | 308f64e | 2017-10-29 08:20:40 +0000 | [diff] [blame] | 2698 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
| 2699 | DPNI_QUEUE_TX_CONFIRM, &buf_layout); |
| 2700 | if (err) { |
| 2701 | dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); |
| 2702 | return err; |
| 2703 | } |
| 2704 | |
Bogdan Purcareata | 4b2d9fe | 2017-10-29 08:20:43 +0000 | [diff] [blame] | 2705 | /* Now that we've set our tx buffer layout, retrieve the minimum |
| 2706 | * required tx data offset. |
| 2707 | */ |
| 2708 | err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, |
| 2709 | &priv->tx_data_offset); |
| 2710 | if (err) { |
| 2711 | dev_err(dev, "dpni_get_tx_data_offset() failed\n"); |
| 2712 | return err; |
| 2713 | } |
| 2714 | |
| 2715 | if ((priv->tx_data_offset % 64) != 0) |
| 2716 | dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", |
| 2717 | priv->tx_data_offset); |
| 2718 | |
| 2719 | /* rx buffer */ |
Ioana Radulescu | 2b7c86e | 2017-12-08 06:47:56 -0600 | [diff] [blame] | 2720 | buf_layout.pass_frame_status = true; |
Bogdan Purcareata | 4b2d9fe | 2017-10-29 08:20:43 +0000 | [diff] [blame] | 2721 | buf_layout.pass_parser_result = true; |
Ioana Ciocoi Radulescu | 27c8748 | 2019-02-04 17:00:35 +0000 | [diff] [blame] | 2722 | buf_layout.data_align = rx_buf_align; |
Bogdan Purcareata | 4b2d9fe | 2017-10-29 08:20:43 +0000 | [diff] [blame] | 2723 | buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); |
| 2724 | buf_layout.private_data_size = 0; |
| 2725 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | |
| 2726 | DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | |
| 2727 | DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | |
Ioana Radulescu | 859f998 | 2018-04-26 18:23:47 +0800 | [diff] [blame] | 2728 | DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | |
| 2729 | DPNI_BUF_LAYOUT_OPT_TIMESTAMP; |
Bogdan Purcareata | 4b2d9fe | 2017-10-29 08:20:43 +0000 | [diff] [blame] | 2730 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
| 2731 | DPNI_QUEUE_RX, &buf_layout); |
| 2732 | if (err) { |
| 2733 | dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); |
| 2734 | return err; |
| 2735 | } |
| 2736 | |
Ioana Radulescu | 308f64e | 2017-10-29 08:20:40 +0000 | [diff] [blame] | 2737 | return 0; |
| 2738 | } |
| 2739 | |
Ioana Ciocoi Radulescu | 1fa0f68 | 2019-02-04 17:00:36 +0000 | [diff] [blame] | 2740 | #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 |
| 2741 | #define DPNI_ENQUEUE_FQID_VER_MINOR 9 |
| 2742 | |
| 2743 | static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, |
| 2744 | struct dpaa2_eth_fq *fq, |
Ioana Ciornei | 48c0481 | 2020-04-22 15:05:10 +0300 | [diff] [blame] | 2745 | struct dpaa2_fd *fd, u8 prio, |
Ioana Ciornei | 6ff8044 | 2020-04-22 15:05:11 +0300 | [diff] [blame] | 2746 | u32 num_frames __always_unused, |
Ioana Ciornei | 48c0481 | 2020-04-22 15:05:10 +0300 | [diff] [blame] | 2747 | int *frames_enqueued) |
Ioana Ciocoi Radulescu | 1fa0f68 | 2019-02-04 17:00:36 +0000 | [diff] [blame] | 2748 | { |
Ioana Ciornei | 48c0481 | 2020-04-22 15:05:10 +0300 | [diff] [blame] | 2749 | int err; |
| 2750 | |
| 2751 | err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, |
| 2752 | priv->tx_qdid, prio, |
| 2753 | fq->tx_qdbin, fd); |
| 2754 | if (!err && frames_enqueued) |
| 2755 | *frames_enqueued = 1; |
| 2756 | return err; |
Ioana Ciocoi Radulescu | 1fa0f68 | 2019-02-04 17:00:36 +0000 | [diff] [blame] | 2757 | } |
| 2758 | |
Ioana Ciornei | 6ff8044 | 2020-04-22 15:05:11 +0300 | [diff] [blame] | 2759 | static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv, |
| 2760 | struct dpaa2_eth_fq *fq, |
| 2761 | struct dpaa2_fd *fd, |
| 2762 | u8 prio, u32 num_frames, |
| 2763 | int *frames_enqueued) |
Ioana Ciocoi Radulescu | 1fa0f68 | 2019-02-04 17:00:36 +0000 | [diff] [blame] | 2764 | { |
Ioana Ciornei | 48c0481 | 2020-04-22 15:05:10 +0300 | [diff] [blame] | 2765 | int err; |
| 2766 | |
Ioana Ciornei | 6ff8044 | 2020-04-22 15:05:11 +0300 | [diff] [blame] | 2767 | err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio, |
| 2768 | fq->tx_fqid[prio], |
| 2769 | fd, num_frames); |
| 2770 | |
| 2771 | if (err == 0) |
| 2772 | return -EBUSY; |
| 2773 | |
| 2774 | if (frames_enqueued) |
| 2775 | *frames_enqueued = err; |
| 2776 | return 0; |
Ioana Ciocoi Radulescu | 1fa0f68 | 2019-02-04 17:00:36 +0000 | [diff] [blame] | 2777 | } |
| 2778 | |
| 2779 | static void set_enqueue_mode(struct dpaa2_eth_priv *priv) |
| 2780 | { |
| 2781 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, |
| 2782 | DPNI_ENQUEUE_FQID_VER_MINOR) < 0) |
| 2783 | priv->enqueue = dpaa2_eth_enqueue_qd; |
| 2784 | else |
Ioana Ciornei | 6ff8044 | 2020-04-22 15:05:11 +0300 | [diff] [blame] | 2785 | priv->enqueue = dpaa2_eth_enqueue_fq_multiple; |
Ioana Ciocoi Radulescu | 1fa0f68 | 2019-02-04 17:00:36 +0000 | [diff] [blame] | 2786 | } |
| 2787 | |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 2788 | static int set_pause(struct dpaa2_eth_priv *priv) |
| 2789 | { |
| 2790 | struct device *dev = priv->net_dev->dev.parent; |
| 2791 | struct dpni_link_cfg link_cfg = {0}; |
| 2792 | int err; |
| 2793 | |
| 2794 | /* Get the default link options so we don't override other flags */ |
| 2795 | err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); |
| 2796 | if (err) { |
| 2797 | dev_err(dev, "dpni_get_link_cfg() failed\n"); |
| 2798 | return err; |
| 2799 | } |
| 2800 | |
| 2801 | /* By default, enable both Rx and Tx pause frames */ |
| 2802 | link_cfg.options |= DPNI_LINK_OPT_PAUSE; |
| 2803 | link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; |
| 2804 | err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); |
| 2805 | if (err) { |
| 2806 | dev_err(dev, "dpni_set_link_cfg() failed\n"); |
| 2807 | return err; |
| 2808 | } |
| 2809 | |
| 2810 | priv->link_state.options = link_cfg.options; |
| 2811 | |
| 2812 | return 0; |
| 2813 | } |
| 2814 | |
Ioana Radulescu | a690af4f | 2019-10-16 10:36:23 +0300 | [diff] [blame] | 2815 | static void update_tx_fqids(struct dpaa2_eth_priv *priv) |
| 2816 | { |
| 2817 | struct dpni_queue_id qid = {0}; |
| 2818 | struct dpaa2_eth_fq *fq; |
| 2819 | struct dpni_queue queue; |
| 2820 | int i, j, err; |
| 2821 | |
| 2822 | /* We only use Tx FQIDs for FQID-based enqueue, so check |
| 2823 | * if DPNI version supports it before updating FQIDs |
| 2824 | */ |
| 2825 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, |
| 2826 | DPNI_ENQUEUE_FQID_VER_MINOR) < 0) |
| 2827 | return; |
| 2828 | |
| 2829 | for (i = 0; i < priv->num_fqs; i++) { |
| 2830 | fq = &priv->fq[i]; |
| 2831 | if (fq->type != DPAA2_TX_CONF_FQ) |
| 2832 | continue; |
| 2833 | for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { |
| 2834 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
| 2835 | DPNI_QUEUE_TX, j, fq->flowid, |
| 2836 | &queue, &qid); |
| 2837 | if (err) |
| 2838 | goto out_err; |
| 2839 | |
| 2840 | fq->tx_fqid[j] = qid.fqid; |
| 2841 | if (fq->tx_fqid[j] == 0) |
| 2842 | goto out_err; |
| 2843 | } |
| 2844 | } |
| 2845 | |
Ioana Ciornei | 6ff8044 | 2020-04-22 15:05:11 +0300 | [diff] [blame] | 2846 | priv->enqueue = dpaa2_eth_enqueue_fq_multiple; |
Ioana Radulescu | a690af4f | 2019-10-16 10:36:23 +0300 | [diff] [blame] | 2847 | |
| 2848 | return; |
| 2849 | |
| 2850 | out_err: |
| 2851 | netdev_info(priv->net_dev, |
| 2852 | "Error reading Tx FQID, fallback to QDID-based enqueue\n"); |
| 2853 | priv->enqueue = dpaa2_eth_enqueue_qd; |
| 2854 | } |
| 2855 | |
Ioana Radulescu | 6aa90fe | 2020-05-31 00:08:09 +0300 | [diff] [blame] | 2856 | /* Configure ingress classification based on VLAN PCP */ |
| 2857 | static int set_vlan_qos(struct dpaa2_eth_priv *priv) |
| 2858 | { |
| 2859 | struct device *dev = priv->net_dev->dev.parent; |
| 2860 | struct dpkg_profile_cfg kg_cfg = {0}; |
| 2861 | struct dpni_qos_tbl_cfg qos_cfg = {0}; |
| 2862 | struct dpni_rule_cfg key_params; |
| 2863 | void *dma_mem, *key, *mask; |
| 2864 | u8 key_size = 2; /* VLAN TCI field */ |
| 2865 | int i, pcp, err; |
| 2866 | |
| 2867 | /* VLAN-based classification only makes sense if we have multiple |
| 2868 | * traffic classes. |
| 2869 | * Also, we need to extract just the 3-bit PCP field from the VLAN |
| 2870 | * header and we can only do that by using a mask |
| 2871 | */ |
| 2872 | if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) { |
| 2873 | dev_dbg(dev, "VLAN-based QoS classification not supported\n"); |
| 2874 | return -EOPNOTSUPP; |
| 2875 | } |
| 2876 | |
| 2877 | dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
| 2878 | if (!dma_mem) |
| 2879 | return -ENOMEM; |
| 2880 | |
| 2881 | kg_cfg.num_extracts = 1; |
| 2882 | kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; |
| 2883 | kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; |
| 2884 | kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; |
| 2885 | kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; |
| 2886 | |
| 2887 | err = dpni_prepare_key_cfg(&kg_cfg, dma_mem); |
| 2888 | if (err) { |
| 2889 | dev_err(dev, "dpni_prepare_key_cfg failed\n"); |
| 2890 | goto out_free_tbl; |
| 2891 | } |
| 2892 | |
| 2893 | /* set QoS table */ |
| 2894 | qos_cfg.default_tc = 0; |
| 2895 | qos_cfg.discard_on_miss = 0; |
| 2896 | qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, |
| 2897 | DPAA2_CLASSIFIER_DMA_SIZE, |
| 2898 | DMA_TO_DEVICE); |
| 2899 | if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { |
| 2900 | dev_err(dev, "QoS table DMA mapping failed\n"); |
| 2901 | err = -ENOMEM; |
| 2902 | goto out_free_tbl; |
| 2903 | } |
| 2904 | |
| 2905 | err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); |
| 2906 | if (err) { |
| 2907 | dev_err(dev, "dpni_set_qos_table failed\n"); |
| 2908 | goto out_unmap_tbl; |
| 2909 | } |
| 2910 | |
| 2911 | /* Add QoS table entries */ |
| 2912 | key = kzalloc(key_size * 2, GFP_KERNEL); |
| 2913 | if (!key) { |
| 2914 | err = -ENOMEM; |
| 2915 | goto out_unmap_tbl; |
| 2916 | } |
| 2917 | mask = key + key_size; |
| 2918 | *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK); |
| 2919 | |
| 2920 | key_params.key_iova = dma_map_single(dev, key, key_size * 2, |
| 2921 | DMA_TO_DEVICE); |
| 2922 | if (dma_mapping_error(dev, key_params.key_iova)) { |
| 2923 | dev_err(dev, "Qos table entry DMA mapping failed\n"); |
| 2924 | err = -ENOMEM; |
| 2925 | goto out_free_key; |
| 2926 | } |
| 2927 | |
| 2928 | key_params.mask_iova = key_params.key_iova + key_size; |
| 2929 | key_params.key_size = key_size; |
| 2930 | |
| 2931 | /* We add rules for PCP-based distribution starting with highest |
| 2932 | * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic |
| 2933 | * classes to accommodate all priority levels, the lowest ones end up |
| 2934 | * on TC 0 which was configured as default |
| 2935 | */ |
| 2936 | for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) { |
| 2937 | *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT); |
| 2938 | dma_sync_single_for_device(dev, key_params.key_iova, |
| 2939 | key_size * 2, DMA_TO_DEVICE); |
| 2940 | |
| 2941 | err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, |
| 2942 | &key_params, i, i); |
| 2943 | if (err) { |
| 2944 | dev_err(dev, "dpni_add_qos_entry failed\n"); |
| 2945 | dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token); |
| 2946 | goto out_unmap_key; |
| 2947 | } |
| 2948 | } |
| 2949 | |
| 2950 | priv->vlan_cls_enabled = true; |
| 2951 | |
| 2952 | /* Table and key memory is not persistent, clean everything up after |
| 2953 | * configuration is finished |
| 2954 | */ |
| 2955 | out_unmap_key: |
| 2956 | dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE); |
| 2957 | out_free_key: |
| 2958 | kfree(key); |
| 2959 | out_unmap_tbl: |
| 2960 | dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE, |
| 2961 | DMA_TO_DEVICE); |
| 2962 | out_free_tbl: |
| 2963 | kfree(dma_mem); |
| 2964 | |
| 2965 | return err; |
| 2966 | } |
| 2967 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2968 | /* Configure the DPNI object this interface is associated with */ |
| 2969 | static int setup_dpni(struct fsl_mc_device *ls_dev) |
| 2970 | { |
| 2971 | struct device *dev = &ls_dev->dev; |
| 2972 | struct dpaa2_eth_priv *priv; |
| 2973 | struct net_device *net_dev; |
| 2974 | int err; |
| 2975 | |
| 2976 | net_dev = dev_get_drvdata(dev); |
| 2977 | priv = netdev_priv(net_dev); |
| 2978 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2979 | /* get a handle for the DPNI object */ |
Ioana Radulescu | 50eacbc | 2017-06-06 10:00:36 -0500 | [diff] [blame] | 2980 | err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2981 | if (err) { |
| 2982 | dev_err(dev, "dpni_open() failed\n"); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 2983 | return err; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 2984 | } |
| 2985 | |
Ioana Radulescu | 311cffa | 2018-03-23 08:44:09 -0500 | [diff] [blame] | 2986 | /* Check if we can work with this DPNI object */ |
| 2987 | err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, |
| 2988 | &priv->dpni_ver_minor); |
| 2989 | if (err) { |
| 2990 | dev_err(dev, "dpni_get_api_version() failed\n"); |
| 2991 | goto close; |
| 2992 | } |
| 2993 | if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { |
| 2994 | dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", |
| 2995 | priv->dpni_ver_major, priv->dpni_ver_minor, |
| 2996 | DPNI_VER_MAJOR, DPNI_VER_MINOR); |
| 2997 | err = -ENOTSUPP; |
| 2998 | goto close; |
| 2999 | } |
| 3000 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3001 | ls_dev->mc_io = priv->mc_io; |
| 3002 | ls_dev->mc_handle = priv->mc_token; |
| 3003 | |
| 3004 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); |
| 3005 | if (err) { |
| 3006 | dev_err(dev, "dpni_reset() failed\n"); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 3007 | goto close; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3008 | } |
| 3009 | |
| 3010 | err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, |
| 3011 | &priv->dpni_attrs); |
| 3012 | if (err) { |
| 3013 | dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 3014 | goto close; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3015 | } |
| 3016 | |
Ioana Radulescu | 308f64e | 2017-10-29 08:20:40 +0000 | [diff] [blame] | 3017 | err = set_buffer_layout(priv); |
| 3018 | if (err) |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 3019 | goto close; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3020 | |
Ioana Ciocoi Radulescu | 1fa0f68 | 2019-02-04 17:00:36 +0000 | [diff] [blame] | 3021 | set_enqueue_mode(priv); |
| 3022 | |
Ioana Radulescu | 8eb3cef | 2019-08-28 17:08:15 +0300 | [diff] [blame] | 3023 | /* Enable pause frame support */ |
| 3024 | if (dpaa2_eth_has_pause_support(priv)) { |
| 3025 | err = set_pause(priv); |
| 3026 | if (err) |
| 3027 | goto close; |
| 3028 | } |
| 3029 | |
Ioana Radulescu | 6aa90fe | 2020-05-31 00:08:09 +0300 | [diff] [blame] | 3030 | err = set_vlan_qos(priv); |
| 3031 | if (err && err != -EOPNOTSUPP) |
| 3032 | goto close; |
| 3033 | |
Xu Wang | 9334d5b | 2020-06-11 02:45:20 +0000 | [diff] [blame] | 3034 | priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), |
| 3035 | sizeof(struct dpaa2_eth_cls_rule), |
| 3036 | GFP_KERNEL); |
Wei Yongjun | 97fff7c | 2020-04-27 10:43:22 +0000 | [diff] [blame] | 3037 | if (!priv->cls_rules) { |
| 3038 | err = -ENOMEM; |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3039 | goto close; |
Wei Yongjun | 97fff7c | 2020-04-27 10:43:22 +0000 | [diff] [blame] | 3040 | } |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3041 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3042 | return 0; |
| 3043 | |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 3044 | close: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3045 | dpni_close(priv->mc_io, 0, priv->mc_token); |
Ioana Radulescu | f6dda80 | 2017-10-29 08:20:39 +0000 | [diff] [blame] | 3046 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3047 | return err; |
| 3048 | } |
| 3049 | |
| 3050 | static void free_dpni(struct dpaa2_eth_priv *priv) |
| 3051 | { |
| 3052 | int err; |
| 3053 | |
| 3054 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); |
| 3055 | if (err) |
| 3056 | netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", |
| 3057 | err); |
| 3058 | |
| 3059 | dpni_close(priv->mc_io, 0, priv->mc_token); |
| 3060 | } |
| 3061 | |
| 3062 | static int setup_rx_flow(struct dpaa2_eth_priv *priv, |
| 3063 | struct dpaa2_eth_fq *fq) |
| 3064 | { |
| 3065 | struct device *dev = priv->net_dev->dev.parent; |
| 3066 | struct dpni_queue queue; |
| 3067 | struct dpni_queue_id qid; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3068 | int err; |
| 3069 | |
| 3070 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3071 | DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3072 | if (err) { |
| 3073 | dev_err(dev, "dpni_get_queue(RX) failed\n"); |
| 3074 | return err; |
| 3075 | } |
| 3076 | |
| 3077 | fq->fqid = qid.fqid; |
| 3078 | |
| 3079 | queue.destination.id = fq->channel->dpcon_id; |
| 3080 | queue.destination.type = DPNI_DEST_DPCON; |
| 3081 | queue.destination.priority = 1; |
Ioana Radulescu | 75c583a | 2018-02-26 10:28:06 -0600 | [diff] [blame] | 3082 | queue.user_context = (u64)(uintptr_t)fq; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3083 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3084 | DPNI_QUEUE_RX, fq->tc, fq->flowid, |
Ioana Radulescu | 16fa1cf | 2019-05-23 17:38:22 +0300 | [diff] [blame] | 3085 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3086 | &queue); |
| 3087 | if (err) { |
| 3088 | dev_err(dev, "dpni_set_queue(RX) failed\n"); |
| 3089 | return err; |
| 3090 | } |
| 3091 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 3092 | /* xdp_rxq setup */ |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3093 | /* only once for each channel */ |
| 3094 | if (fq->tc > 0) |
| 3095 | return 0; |
| 3096 | |
Ioana Radulescu | d678be1 | 2019-03-01 17:47:24 +0000 | [diff] [blame] | 3097 | err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, |
| 3098 | fq->flowid); |
| 3099 | if (err) { |
| 3100 | dev_err(dev, "xdp_rxq_info_reg failed\n"); |
| 3101 | return err; |
| 3102 | } |
| 3103 | |
| 3104 | err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, |
| 3105 | MEM_TYPE_PAGE_ORDER0, NULL); |
| 3106 | if (err) { |
| 3107 | dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); |
| 3108 | return err; |
| 3109 | } |
| 3110 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3111 | return 0; |
| 3112 | } |
| 3113 | |
| 3114 | static int setup_tx_flow(struct dpaa2_eth_priv *priv, |
| 3115 | struct dpaa2_eth_fq *fq) |
| 3116 | { |
| 3117 | struct device *dev = priv->net_dev->dev.parent; |
| 3118 | struct dpni_queue queue; |
| 3119 | struct dpni_queue_id qid; |
Ioana Radulescu | 15c87f6 | 2019-06-11 14:50:02 +0300 | [diff] [blame] | 3120 | int i, err; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3121 | |
Ioana Radulescu | 15c87f6 | 2019-06-11 14:50:02 +0300 | [diff] [blame] | 3122 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 3123 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
| 3124 | DPNI_QUEUE_TX, i, fq->flowid, |
| 3125 | &queue, &qid); |
| 3126 | if (err) { |
| 3127 | dev_err(dev, "dpni_get_queue(TX) failed\n"); |
| 3128 | return err; |
| 3129 | } |
| 3130 | fq->tx_fqid[i] = qid.fqid; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3131 | } |
| 3132 | |
Ioana Radulescu | 15c87f6 | 2019-06-11 14:50:02 +0300 | [diff] [blame] | 3133 | /* All Tx queues belonging to the same flowid have the same qdbin */ |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3134 | fq->tx_qdbin = qid.qdbin; |
| 3135 | |
| 3136 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
| 3137 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, |
| 3138 | &queue, &qid); |
| 3139 | if (err) { |
| 3140 | dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); |
| 3141 | return err; |
| 3142 | } |
| 3143 | |
| 3144 | fq->fqid = qid.fqid; |
| 3145 | |
| 3146 | queue.destination.id = fq->channel->dpcon_id; |
| 3147 | queue.destination.type = DPNI_DEST_DPCON; |
| 3148 | queue.destination.priority = 0; |
Ioana Radulescu | 75c583a | 2018-02-26 10:28:06 -0600 | [diff] [blame] | 3149 | queue.user_context = (u64)(uintptr_t)fq; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3150 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, |
| 3151 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, |
| 3152 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, |
| 3153 | &queue); |
| 3154 | if (err) { |
| 3155 | dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); |
| 3156 | return err; |
| 3157 | } |
| 3158 | |
| 3159 | return 0; |
| 3160 | } |
| 3161 | |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3162 | /* Supported header fields for Rx hash distribution key */ |
Ioana Radulescu | f76c483 | 2018-10-01 13:44:56 +0300 | [diff] [blame] | 3163 | static const struct dpaa2_eth_dist_fields dist_fields[] = { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3164 | { |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3165 | /* L2 header */ |
| 3166 | .rxnfc_field = RXH_L2DA, |
| 3167 | .cls_prot = NET_PROT_ETH, |
| 3168 | .cls_field = NH_FLD_ETH_DA, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3169 | .id = DPAA2_ETH_DIST_ETHDST, |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3170 | .size = 6, |
| 3171 | }, { |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3172 | .cls_prot = NET_PROT_ETH, |
| 3173 | .cls_field = NH_FLD_ETH_SA, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3174 | .id = DPAA2_ETH_DIST_ETHSRC, |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3175 | .size = 6, |
| 3176 | }, { |
| 3177 | /* This is the last ethertype field parsed: |
| 3178 | * depending on frame format, it can be the MAC ethertype |
| 3179 | * or the VLAN etype. |
| 3180 | */ |
| 3181 | .cls_prot = NET_PROT_ETH, |
| 3182 | .cls_field = NH_FLD_ETH_TYPE, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3183 | .id = DPAA2_ETH_DIST_ETHTYPE, |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3184 | .size = 2, |
| 3185 | }, { |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3186 | /* VLAN header */ |
| 3187 | .rxnfc_field = RXH_VLAN, |
| 3188 | .cls_prot = NET_PROT_VLAN, |
| 3189 | .cls_field = NH_FLD_VLAN_TCI, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3190 | .id = DPAA2_ETH_DIST_VLAN, |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3191 | .size = 2, |
| 3192 | }, { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3193 | /* IP header */ |
| 3194 | .rxnfc_field = RXH_IP_SRC, |
| 3195 | .cls_prot = NET_PROT_IP, |
| 3196 | .cls_field = NH_FLD_IP_SRC, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3197 | .id = DPAA2_ETH_DIST_IPSRC, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3198 | .size = 4, |
| 3199 | }, { |
| 3200 | .rxnfc_field = RXH_IP_DST, |
| 3201 | .cls_prot = NET_PROT_IP, |
| 3202 | .cls_field = NH_FLD_IP_DST, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3203 | .id = DPAA2_ETH_DIST_IPDST, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3204 | .size = 4, |
| 3205 | }, { |
| 3206 | .rxnfc_field = RXH_L3_PROTO, |
| 3207 | .cls_prot = NET_PROT_IP, |
| 3208 | .cls_field = NH_FLD_IP_PROTO, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3209 | .id = DPAA2_ETH_DIST_IPPROTO, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3210 | .size = 1, |
| 3211 | }, { |
| 3212 | /* Using UDP ports, this is functionally equivalent to raw |
| 3213 | * byte pairs from L4 header. |
| 3214 | */ |
| 3215 | .rxnfc_field = RXH_L4_B_0_1, |
| 3216 | .cls_prot = NET_PROT_UDP, |
| 3217 | .cls_field = NH_FLD_UDP_PORT_SRC, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3218 | .id = DPAA2_ETH_DIST_L4SRC, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3219 | .size = 2, |
| 3220 | }, { |
| 3221 | .rxnfc_field = RXH_L4_B_2_3, |
| 3222 | .cls_prot = NET_PROT_UDP, |
| 3223 | .cls_field = NH_FLD_UDP_PORT_DST, |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3224 | .id = DPAA2_ETH_DIST_L4DST, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3225 | .size = 2, |
| 3226 | }, |
| 3227 | }; |
| 3228 | |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3229 | /* Configure the Rx hash key using the legacy API */ |
| 3230 | static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) |
| 3231 | { |
| 3232 | struct device *dev = priv->net_dev->dev.parent; |
| 3233 | struct dpni_rx_tc_dist_cfg dist_cfg; |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3234 | int i, err = 0; |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3235 | |
| 3236 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
| 3237 | |
| 3238 | dist_cfg.key_cfg_iova = key; |
| 3239 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
| 3240 | dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; |
| 3241 | |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3242 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 3243 | err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, |
| 3244 | i, &dist_cfg); |
| 3245 | if (err) { |
| 3246 | dev_err(dev, "dpni_set_rx_tc_dist failed\n"); |
| 3247 | break; |
| 3248 | } |
| 3249 | } |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3250 | |
| 3251 | return err; |
| 3252 | } |
| 3253 | |
| 3254 | /* Configure the Rx hash key using the new API */ |
| 3255 | static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) |
| 3256 | { |
| 3257 | struct device *dev = priv->net_dev->dev.parent; |
| 3258 | struct dpni_rx_dist_cfg dist_cfg; |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3259 | int i, err = 0; |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3260 | |
| 3261 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
| 3262 | |
| 3263 | dist_cfg.key_cfg_iova = key; |
| 3264 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
| 3265 | dist_cfg.enable = 1; |
| 3266 | |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3267 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 3268 | dist_cfg.tc = i; |
| 3269 | err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, |
| 3270 | &dist_cfg); |
| 3271 | if (err) { |
| 3272 | dev_err(dev, "dpni_set_rx_hash_dist failed\n"); |
| 3273 | break; |
| 3274 | } |
| 3275 | } |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3276 | |
| 3277 | return err; |
| 3278 | } |
| 3279 | |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3280 | /* Configure the Rx flow classification key */ |
| 3281 | static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) |
| 3282 | { |
| 3283 | struct device *dev = priv->net_dev->dev.parent; |
| 3284 | struct dpni_rx_dist_cfg dist_cfg; |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3285 | int i, err = 0; |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3286 | |
| 3287 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
| 3288 | |
| 3289 | dist_cfg.key_cfg_iova = key; |
| 3290 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
| 3291 | dist_cfg.enable = 1; |
| 3292 | |
Ioana Radulescu | 685e39e | 2020-05-31 00:08:08 +0300 | [diff] [blame] | 3293 | for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 3294 | dist_cfg.tc = i; |
| 3295 | err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, |
| 3296 | &dist_cfg); |
| 3297 | if (err) { |
| 3298 | dev_err(dev, "dpni_set_rx_fs_dist failed\n"); |
| 3299 | break; |
| 3300 | } |
| 3301 | } |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3302 | |
| 3303 | return err; |
| 3304 | } |
| 3305 | |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3306 | /* Size of the Rx flow classification key */ |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3307 | int dpaa2_eth_cls_key_size(u64 fields) |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3308 | { |
| 3309 | int i, size = 0; |
| 3310 | |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3311 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
| 3312 | if (!(fields & dist_fields[i].id)) |
| 3313 | continue; |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3314 | size += dist_fields[i].size; |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3315 | } |
Ioana Radulescu | afb90db | 2018-10-01 13:44:58 +0300 | [diff] [blame] | 3316 | |
| 3317 | return size; |
| 3318 | } |
| 3319 | |
| 3320 | /* Offset of header field in Rx classification key */ |
| 3321 | int dpaa2_eth_cls_fld_off(int prot, int field) |
| 3322 | { |
| 3323 | int i, off = 0; |
| 3324 | |
| 3325 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
| 3326 | if (dist_fields[i].cls_prot == prot && |
| 3327 | dist_fields[i].cls_field == field) |
| 3328 | return off; |
| 3329 | off += dist_fields[i].size; |
| 3330 | } |
| 3331 | |
| 3332 | WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); |
| 3333 | return 0; |
| 3334 | } |
| 3335 | |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3336 | /* Prune unused fields from the classification rule. |
| 3337 | * Used when masking is not supported |
| 3338 | */ |
| 3339 | void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) |
| 3340 | { |
| 3341 | int off = 0, new_off = 0; |
| 3342 | int i, size; |
| 3343 | |
| 3344 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
| 3345 | size = dist_fields[i].size; |
| 3346 | if (dist_fields[i].id & fields) { |
| 3347 | memcpy(key_mem + new_off, key_mem + off, size); |
| 3348 | new_off += size; |
| 3349 | } |
| 3350 | off += size; |
| 3351 | } |
| 3352 | } |
| 3353 | |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3354 | /* Set Rx distribution (hash or flow classification) key |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3355 | * flags is a combination of RXH_ bits |
| 3356 | */ |
Ioana Ciornei | 3233c15 | 2018-10-12 16:27:29 +0000 | [diff] [blame] | 3357 | static int dpaa2_eth_set_dist_key(struct net_device *net_dev, |
| 3358 | enum dpaa2_eth_rx_dist type, u64 flags) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3359 | { |
| 3360 | struct device *dev = net_dev->dev.parent; |
| 3361 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 3362 | struct dpkg_profile_cfg cls_cfg; |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3363 | u32 rx_hash_fields = 0; |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3364 | dma_addr_t key_iova; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3365 | u8 *dma_mem; |
| 3366 | int i; |
| 3367 | int err = 0; |
| 3368 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3369 | memset(&cls_cfg, 0, sizeof(cls_cfg)); |
| 3370 | |
Ioana Radulescu | f76c483 | 2018-10-01 13:44:56 +0300 | [diff] [blame] | 3371 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3372 | struct dpkg_extract *key = |
| 3373 | &cls_cfg.extracts[cls_cfg.num_extracts]; |
| 3374 | |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3375 | /* For both Rx hashing and classification keys |
| 3376 | * we set only the selected fields. |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3377 | */ |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3378 | if (!(flags & dist_fields[i].id)) |
| 3379 | continue; |
| 3380 | if (type == DPAA2_ETH_RX_DIST_HASH) |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3381 | rx_hash_fields |= dist_fields[i].rxnfc_field; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3382 | |
| 3383 | if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { |
| 3384 | dev_err(dev, "error adding key extraction rule, too many rules?\n"); |
| 3385 | return -E2BIG; |
| 3386 | } |
| 3387 | |
| 3388 | key->type = DPKG_EXTRACT_FROM_HDR; |
Ioana Radulescu | f76c483 | 2018-10-01 13:44:56 +0300 | [diff] [blame] | 3389 | key->extract.from_hdr.prot = dist_fields[i].cls_prot; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3390 | key->extract.from_hdr.type = DPKG_FULL_FIELD; |
Ioana Radulescu | f76c483 | 2018-10-01 13:44:56 +0300 | [diff] [blame] | 3391 | key->extract.from_hdr.field = dist_fields[i].cls_field; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3392 | cls_cfg.num_extracts++; |
| 3393 | } |
| 3394 | |
Ioana Radulescu | e40ef9e | 2017-06-06 10:00:30 -0500 | [diff] [blame] | 3395 | dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3396 | if (!dma_mem) |
| 3397 | return -ENOMEM; |
| 3398 | |
| 3399 | err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); |
| 3400 | if (err) { |
Ioana Radulescu | 77160af | 2017-06-06 10:00:28 -0500 | [diff] [blame] | 3401 | dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3402 | goto free_key; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3403 | } |
| 3404 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3405 | /* Prepare for setting the rx dist */ |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3406 | key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, |
| 3407 | DMA_TO_DEVICE); |
| 3408 | if (dma_mapping_error(dev, key_iova)) { |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3409 | dev_err(dev, "DMA mapping failed\n"); |
| 3410 | err = -ENOMEM; |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3411 | goto free_key; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3412 | } |
| 3413 | |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3414 | if (type == DPAA2_ETH_RX_DIST_HASH) { |
| 3415 | if (dpaa2_eth_has_legacy_dist(priv)) |
| 3416 | err = config_legacy_hash_key(priv, key_iova); |
| 3417 | else |
| 3418 | err = config_hash_key(priv, key_iova); |
| 3419 | } else { |
| 3420 | err = config_cls_key(priv, key_iova); |
| 3421 | } |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3422 | |
| 3423 | dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, |
| 3424 | DMA_TO_DEVICE); |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3425 | if (!err && type == DPAA2_ETH_RX_DIST_HASH) |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3426 | priv->rx_hash_fields = rx_hash_fields; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3427 | |
Ioana Radulescu | df85aeb | 2018-10-01 13:44:55 +0300 | [diff] [blame] | 3428 | free_key: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3429 | kfree(dma_mem); |
| 3430 | return err; |
| 3431 | } |
| 3432 | |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3433 | int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) |
| 3434 | { |
| 3435 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3436 | u64 key = 0; |
| 3437 | int i; |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3438 | |
| 3439 | if (!dpaa2_eth_hash_enabled(priv)) |
| 3440 | return -EOPNOTSUPP; |
| 3441 | |
Ioana Ciocoi Radulescu | 3a1e6b8 | 2019-04-16 17:13:29 +0000 | [diff] [blame] | 3442 | for (i = 0; i < ARRAY_SIZE(dist_fields); i++) |
| 3443 | if (dist_fields[i].rxnfc_field & flags) |
| 3444 | key |= dist_fields[i].id; |
| 3445 | |
| 3446 | return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3447 | } |
| 3448 | |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3449 | int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) |
| 3450 | { |
| 3451 | return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); |
| 3452 | } |
| 3453 | |
| 3454 | static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3455 | { |
| 3456 | struct device *dev = priv->net_dev->dev.parent; |
Ioana Ciocoi Radulescu | df8e249 | 2019-04-16 17:13:28 +0000 | [diff] [blame] | 3457 | int err; |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3458 | |
| 3459 | /* Check if we actually support Rx flow classification */ |
| 3460 | if (dpaa2_eth_has_legacy_dist(priv)) { |
| 3461 | dev_dbg(dev, "Rx cls not supported by current MC version\n"); |
| 3462 | return -EOPNOTSUPP; |
| 3463 | } |
| 3464 | |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3465 | if (!dpaa2_eth_fs_enabled(priv)) { |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3466 | dev_dbg(dev, "Rx cls disabled in DPNI options\n"); |
| 3467 | return -EOPNOTSUPP; |
| 3468 | } |
| 3469 | |
| 3470 | if (!dpaa2_eth_hash_enabled(priv)) { |
| 3471 | dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); |
| 3472 | return -EOPNOTSUPP; |
| 3473 | } |
| 3474 | |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3475 | /* If there is no support for masking in the classification table, |
| 3476 | * we don't set a default key, as it will depend on the rules |
| 3477 | * added by the user at runtime. |
| 3478 | */ |
| 3479 | if (!dpaa2_eth_fs_mask_enabled(priv)) |
| 3480 | goto out; |
| 3481 | |
| 3482 | err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); |
Ioana Ciocoi Radulescu | df8e249 | 2019-04-16 17:13:28 +0000 | [diff] [blame] | 3483 | if (err) |
| 3484 | return err; |
| 3485 | |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3486 | out: |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3487 | priv->rx_cls_enabled = 1; |
| 3488 | |
Ioana Ciocoi Radulescu | df8e249 | 2019-04-16 17:13:28 +0000 | [diff] [blame] | 3489 | return 0; |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3490 | } |
| 3491 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3492 | /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, |
| 3493 | * frame queues and channels |
| 3494 | */ |
| 3495 | static int bind_dpni(struct dpaa2_eth_priv *priv) |
| 3496 | { |
| 3497 | struct net_device *net_dev = priv->net_dev; |
| 3498 | struct device *dev = net_dev->dev.parent; |
| 3499 | struct dpni_pools_cfg pools_params; |
| 3500 | struct dpni_error_cfg err_cfg; |
| 3501 | int err = 0; |
| 3502 | int i; |
| 3503 | |
| 3504 | pools_params.num_dpbp = 1; |
| 3505 | pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; |
| 3506 | pools_params.pools[0].backup_pool = 0; |
Ioana Ciornei | efa6a7d | 2020-05-15 15:30:22 +0300 | [diff] [blame] | 3507 | pools_params.pools[0].buffer_size = priv->rx_buf_size; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3508 | err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); |
| 3509 | if (err) { |
| 3510 | dev_err(dev, "dpni_set_pools() failed\n"); |
| 3511 | return err; |
| 3512 | } |
| 3513 | |
Ioana Radulescu | 227686b | 2018-07-27 09:12:59 -0500 | [diff] [blame] | 3514 | /* have the interface implicitly distribute traffic based on |
| 3515 | * the default hash key |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3516 | */ |
Ioana Radulescu | 227686b | 2018-07-27 09:12:59 -0500 | [diff] [blame] | 3517 | err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); |
Ioana Ciocoi Radulescu | edad8d2 | 2018-09-24 15:36:21 +0000 | [diff] [blame] | 3518 | if (err && err != -EOPNOTSUPP) |
Ioana Radulescu | 0f4c295 | 2017-10-11 08:29:50 -0500 | [diff] [blame] | 3519 | dev_err(dev, "Failed to configure hashing\n"); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3520 | |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3521 | /* Configure the flow classification key; it includes all |
| 3522 | * supported header fields and cannot be modified at runtime |
| 3523 | */ |
Ioana Ciocoi Radulescu | 2d68023 | 2019-04-16 17:13:30 +0000 | [diff] [blame] | 3524 | err = dpaa2_eth_set_default_cls(priv); |
Ioana Radulescu | 4aaaf9b | 2018-10-01 13:44:57 +0300 | [diff] [blame] | 3525 | if (err && err != -EOPNOTSUPP) |
| 3526 | dev_err(dev, "Failed to configure Rx classification key\n"); |
| 3527 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3528 | /* Configure handling of error frames */ |
Ioana Radulescu | 39163c0 | 2017-06-06 10:00:39 -0500 | [diff] [blame] | 3529 | err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3530 | err_cfg.set_frame_annotation = 1; |
| 3531 | err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; |
| 3532 | err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, |
| 3533 | &err_cfg); |
| 3534 | if (err) { |
| 3535 | dev_err(dev, "dpni_set_errors_behavior failed\n"); |
| 3536 | return err; |
| 3537 | } |
| 3538 | |
| 3539 | /* Configure Rx and Tx conf queues to generate CDANs */ |
| 3540 | for (i = 0; i < priv->num_fqs; i++) { |
| 3541 | switch (priv->fq[i].type) { |
| 3542 | case DPAA2_RX_FQ: |
| 3543 | err = setup_rx_flow(priv, &priv->fq[i]); |
| 3544 | break; |
| 3545 | case DPAA2_TX_CONF_FQ: |
| 3546 | err = setup_tx_flow(priv, &priv->fq[i]); |
| 3547 | break; |
| 3548 | default: |
| 3549 | dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); |
| 3550 | return -EINVAL; |
| 3551 | } |
| 3552 | if (err) |
| 3553 | return err; |
| 3554 | } |
| 3555 | |
| 3556 | err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, |
| 3557 | DPNI_QUEUE_TX, &priv->tx_qdid); |
| 3558 | if (err) { |
| 3559 | dev_err(dev, "dpni_get_qdid() failed\n"); |
| 3560 | return err; |
| 3561 | } |
| 3562 | |
| 3563 | return 0; |
| 3564 | } |
| 3565 | |
| 3566 | /* Allocate rings for storing incoming frame descriptors */ |
| 3567 | static int alloc_rings(struct dpaa2_eth_priv *priv) |
| 3568 | { |
| 3569 | struct net_device *net_dev = priv->net_dev; |
| 3570 | struct device *dev = net_dev->dev.parent; |
| 3571 | int i; |
| 3572 | |
| 3573 | for (i = 0; i < priv->num_channels; i++) { |
| 3574 | priv->channel[i]->store = |
| 3575 | dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); |
| 3576 | if (!priv->channel[i]->store) { |
| 3577 | netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); |
| 3578 | goto err_ring; |
| 3579 | } |
| 3580 | } |
| 3581 | |
| 3582 | return 0; |
| 3583 | |
| 3584 | err_ring: |
| 3585 | for (i = 0; i < priv->num_channels; i++) { |
| 3586 | if (!priv->channel[i]->store) |
| 3587 | break; |
| 3588 | dpaa2_io_store_destroy(priv->channel[i]->store); |
| 3589 | } |
| 3590 | |
| 3591 | return -ENOMEM; |
| 3592 | } |
| 3593 | |
| 3594 | static void free_rings(struct dpaa2_eth_priv *priv) |
| 3595 | { |
| 3596 | int i; |
| 3597 | |
| 3598 | for (i = 0; i < priv->num_channels; i++) |
| 3599 | dpaa2_io_store_destroy(priv->channel[i]->store); |
| 3600 | } |
| 3601 | |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3602 | static int set_mac_addr(struct dpaa2_eth_priv *priv) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3603 | { |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3604 | struct net_device *net_dev = priv->net_dev; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3605 | struct device *dev = net_dev->dev.parent; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3606 | u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3607 | int err; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3608 | |
| 3609 | /* Get firmware address, if any */ |
| 3610 | err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); |
| 3611 | if (err) { |
| 3612 | dev_err(dev, "dpni_get_port_mac_addr() failed\n"); |
| 3613 | return err; |
| 3614 | } |
| 3615 | |
| 3616 | /* Get DPNI attributes address, if any */ |
| 3617 | err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, |
| 3618 | dpni_mac_addr); |
| 3619 | if (err) { |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3620 | dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3621 | return err; |
| 3622 | } |
| 3623 | |
| 3624 | /* First check if firmware has any address configured by bootloader */ |
| 3625 | if (!is_zero_ether_addr(mac_addr)) { |
| 3626 | /* If the DPMAC addr != DPNI addr, update it */ |
| 3627 | if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { |
| 3628 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, |
| 3629 | priv->mc_token, |
| 3630 | mac_addr); |
| 3631 | if (err) { |
| 3632 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); |
| 3633 | return err; |
| 3634 | } |
| 3635 | } |
| 3636 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); |
| 3637 | } else if (is_zero_ether_addr(dpni_mac_addr)) { |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3638 | /* No MAC address configured, fill in net_dev->dev_addr |
| 3639 | * with a random one |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3640 | */ |
| 3641 | eth_hw_addr_random(net_dev); |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3642 | dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); |
| 3643 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3644 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, |
| 3645 | net_dev->dev_addr); |
| 3646 | if (err) { |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3647 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3648 | return err; |
| 3649 | } |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3650 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3651 | /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all |
| 3652 | * practical purposes, this will be our "permanent" mac address, |
| 3653 | * at least until the next reboot. This move will also permit |
| 3654 | * register_netdevice() to properly fill up net_dev->perm_addr. |
| 3655 | */ |
| 3656 | net_dev->addr_assign_type = NET_ADDR_PERM; |
| 3657 | } else { |
| 3658 | /* NET_ADDR_PERM is default, all we have to do is |
| 3659 | * fill in the device addr. |
| 3660 | */ |
| 3661 | memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); |
| 3662 | } |
| 3663 | |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3664 | return 0; |
| 3665 | } |
| 3666 | |
| 3667 | static int netdev_init(struct net_device *net_dev) |
| 3668 | { |
| 3669 | struct device *dev = net_dev->dev.parent; |
| 3670 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
Ioana Radulescu | 7f12c8a3 | 2018-08-29 04:42:39 -0500 | [diff] [blame] | 3671 | u32 options = priv->dpni_attrs.options; |
| 3672 | u64 supported = 0, not_supported = 0; |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3673 | u8 bcast_addr[ETH_ALEN]; |
Ioana Radulescu | bb5b42c | 2017-06-06 10:00:41 -0500 | [diff] [blame] | 3674 | u8 num_queues; |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3675 | int err; |
| 3676 | |
| 3677 | net_dev->netdev_ops = &dpaa2_eth_ops; |
Ioana Radulescu | 7f12c8a3 | 2018-08-29 04:42:39 -0500 | [diff] [blame] | 3678 | net_dev->ethtool_ops = &dpaa2_ethtool_ops; |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3679 | |
| 3680 | err = set_mac_addr(priv); |
| 3681 | if (err) |
| 3682 | return err; |
| 3683 | |
| 3684 | /* Explicitly add the broadcast address to the MAC filtering table */ |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3685 | eth_broadcast_addr(bcast_addr); |
| 3686 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); |
| 3687 | if (err) { |
Ioana Radulescu | 6ab0086 | 2017-06-06 10:00:40 -0500 | [diff] [blame] | 3688 | dev_err(dev, "dpni_add_mac_addr() failed\n"); |
| 3689 | return err; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3690 | } |
| 3691 | |
Ioana Radulescu | 3ccc8d4 | 2018-07-09 10:01:10 -0500 | [diff] [blame] | 3692 | /* Set MTU upper limit; lower limit is 68B (default value) */ |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3693 | net_dev->max_mtu = DPAA2_ETH_MAX_MTU; |
Ioana Radulescu | 00fee00 | 2018-07-09 10:01:11 -0500 | [diff] [blame] | 3694 | err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, |
Ioana Radulescu | 81f34e9 | 2018-07-12 12:12:29 -0500 | [diff] [blame] | 3695 | DPAA2_ETH_MFL); |
Ioana Radulescu | 00fee00 | 2018-07-09 10:01:11 -0500 | [diff] [blame] | 3696 | if (err) { |
| 3697 | dev_err(dev, "dpni_set_max_frame_length() failed\n"); |
| 3698 | return err; |
| 3699 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3700 | |
Ioana Radulescu | bb5b42c | 2017-06-06 10:00:41 -0500 | [diff] [blame] | 3701 | /* Set actual number of queues in the net device */ |
| 3702 | num_queues = dpaa2_eth_queue_count(priv); |
| 3703 | err = netif_set_real_num_tx_queues(net_dev, num_queues); |
| 3704 | if (err) { |
| 3705 | dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); |
| 3706 | return err; |
| 3707 | } |
| 3708 | err = netif_set_real_num_rx_queues(net_dev, num_queues); |
| 3709 | if (err) { |
| 3710 | dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); |
| 3711 | return err; |
| 3712 | } |
| 3713 | |
Ioana Radulescu | 7f12c8a3 | 2018-08-29 04:42:39 -0500 | [diff] [blame] | 3714 | /* Capabilities listing */ |
| 3715 | supported |= IFF_LIVE_ADDR_CHANGE; |
| 3716 | |
| 3717 | if (options & DPNI_OPT_NO_MAC_FILTER) |
| 3718 | not_supported |= IFF_UNICAST_FLT; |
| 3719 | else |
| 3720 | supported |= IFF_UNICAST_FLT; |
| 3721 | |
| 3722 | net_dev->priv_flags |= supported; |
| 3723 | net_dev->priv_flags &= ~not_supported; |
| 3724 | |
| 3725 | /* Features */ |
| 3726 | net_dev->features = NETIF_F_RXCSUM | |
| 3727 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
| 3728 | NETIF_F_SG | NETIF_F_HIGHDMA | |
| 3729 | NETIF_F_LLTX; |
| 3730 | net_dev->hw_features = net_dev->features; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3731 | |
| 3732 | return 0; |
| 3733 | } |
| 3734 | |
| 3735 | static int poll_link_state(void *arg) |
| 3736 | { |
| 3737 | struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; |
| 3738 | int err; |
| 3739 | |
| 3740 | while (!kthread_should_stop()) { |
| 3741 | err = link_state_update(priv); |
| 3742 | if (unlikely(err)) |
| 3743 | return err; |
| 3744 | |
| 3745 | msleep(DPAA2_ETH_LINK_STATE_REFRESH); |
| 3746 | } |
| 3747 | |
| 3748 | return 0; |
| 3749 | } |
| 3750 | |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 3751 | static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) |
| 3752 | { |
| 3753 | struct fsl_mc_device *dpni_dev, *dpmac_dev; |
| 3754 | struct dpaa2_mac *mac; |
| 3755 | int err; |
| 3756 | |
| 3757 | dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); |
| 3758 | dpmac_dev = fsl_mc_get_endpoint(dpni_dev); |
| 3759 | if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) |
| 3760 | return 0; |
| 3761 | |
| 3762 | if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io)) |
| 3763 | return 0; |
| 3764 | |
| 3765 | mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); |
| 3766 | if (!mac) |
| 3767 | return -ENOMEM; |
| 3768 | |
| 3769 | mac->mc_dev = dpmac_dev; |
| 3770 | mac->mc_io = priv->mc_io; |
| 3771 | mac->net_dev = priv->net_dev; |
| 3772 | |
| 3773 | err = dpaa2_mac_connect(mac); |
| 3774 | if (err) { |
| 3775 | netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n"); |
| 3776 | kfree(mac); |
| 3777 | return err; |
| 3778 | } |
| 3779 | priv->mac = mac; |
| 3780 | |
| 3781 | return 0; |
| 3782 | } |
| 3783 | |
| 3784 | static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) |
| 3785 | { |
| 3786 | if (!priv->mac) |
| 3787 | return; |
| 3788 | |
| 3789 | dpaa2_mac_disconnect(priv->mac); |
| 3790 | kfree(priv->mac); |
| 3791 | priv->mac = NULL; |
| 3792 | } |
| 3793 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3794 | static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) |
| 3795 | { |
Ioana Radulescu | 112197d | 2017-10-11 08:29:49 -0500 | [diff] [blame] | 3796 | u32 status = ~0; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3797 | struct device *dev = (struct device *)arg; |
| 3798 | struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); |
| 3799 | struct net_device *net_dev = dev_get_drvdata(dev); |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 3800 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3801 | int err; |
| 3802 | |
| 3803 | err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, |
| 3804 | DPNI_IRQ_INDEX, &status); |
| 3805 | if (unlikely(err)) { |
Ioana Radulescu | 77160af | 2017-06-06 10:00:28 -0500 | [diff] [blame] | 3806 | netdev_err(net_dev, "Can't get irq status (err %d)\n", err); |
Ioana Radulescu | 112197d | 2017-10-11 08:29:49 -0500 | [diff] [blame] | 3807 | return IRQ_HANDLED; |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3808 | } |
| 3809 | |
Ioana Radulescu | 112197d | 2017-10-11 08:29:49 -0500 | [diff] [blame] | 3810 | if (status & DPNI_IRQ_EVENT_LINK_CHANGED) |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3811 | link_state_update(netdev_priv(net_dev)); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3812 | |
Ioana Ciornei | f5c3fff | 2019-10-31 01:18:30 +0200 | [diff] [blame] | 3813 | if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { |
Florin Chiculita | 8398b37 | 2019-10-16 10:36:22 +0300 | [diff] [blame] | 3814 | set_mac_addr(netdev_priv(net_dev)); |
Ioana Ciornei | f5c3fff | 2019-10-31 01:18:30 +0200 | [diff] [blame] | 3815 | update_tx_fqids(priv); |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 3816 | |
| 3817 | rtnl_lock(); |
| 3818 | if (priv->mac) |
| 3819 | dpaa2_eth_disconnect_mac(priv); |
| 3820 | else |
| 3821 | dpaa2_eth_connect_mac(priv); |
| 3822 | rtnl_unlock(); |
Ioana Ciornei | f5c3fff | 2019-10-31 01:18:30 +0200 | [diff] [blame] | 3823 | } |
Florin Chiculita | 8398b37 | 2019-10-16 10:36:22 +0300 | [diff] [blame] | 3824 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3825 | return IRQ_HANDLED; |
| 3826 | } |
| 3827 | |
| 3828 | static int setup_irqs(struct fsl_mc_device *ls_dev) |
| 3829 | { |
| 3830 | int err = 0; |
| 3831 | struct fsl_mc_device_irq *irq; |
| 3832 | |
| 3833 | err = fsl_mc_allocate_irqs(ls_dev); |
| 3834 | if (err) { |
| 3835 | dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); |
| 3836 | return err; |
| 3837 | } |
| 3838 | |
| 3839 | irq = ls_dev->irqs[0]; |
| 3840 | err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, |
Ioana Radulescu | fdc9b53 | 2018-03-23 08:44:05 -0500 | [diff] [blame] | 3841 | NULL, dpni_irq0_handler_thread, |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3842 | IRQF_NO_SUSPEND | IRQF_ONESHOT, |
| 3843 | dev_name(&ls_dev->dev), &ls_dev->dev); |
| 3844 | if (err < 0) { |
Ioana Radulescu | 77160af | 2017-06-06 10:00:28 -0500 | [diff] [blame] | 3845 | dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3846 | goto free_mc_irq; |
| 3847 | } |
| 3848 | |
| 3849 | err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, |
Florin Chiculita | 8398b37 | 2019-10-16 10:36:22 +0300 | [diff] [blame] | 3850 | DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | |
| 3851 | DPNI_IRQ_EVENT_ENDPOINT_CHANGED); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3852 | if (err < 0) { |
Ioana Radulescu | 77160af | 2017-06-06 10:00:28 -0500 | [diff] [blame] | 3853 | dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3854 | goto free_irq; |
| 3855 | } |
| 3856 | |
| 3857 | err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, |
| 3858 | DPNI_IRQ_INDEX, 1); |
| 3859 | if (err < 0) { |
Ioana Radulescu | 77160af | 2017-06-06 10:00:28 -0500 | [diff] [blame] | 3860 | dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3861 | goto free_irq; |
| 3862 | } |
| 3863 | |
| 3864 | return 0; |
| 3865 | |
| 3866 | free_irq: |
| 3867 | devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); |
| 3868 | free_mc_irq: |
| 3869 | fsl_mc_free_irqs(ls_dev); |
| 3870 | |
| 3871 | return err; |
| 3872 | } |
| 3873 | |
| 3874 | static void add_ch_napi(struct dpaa2_eth_priv *priv) |
| 3875 | { |
| 3876 | int i; |
| 3877 | struct dpaa2_eth_channel *ch; |
| 3878 | |
| 3879 | for (i = 0; i < priv->num_channels; i++) { |
| 3880 | ch = priv->channel[i]; |
| 3881 | /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ |
| 3882 | netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, |
| 3883 | NAPI_POLL_WEIGHT); |
| 3884 | } |
| 3885 | } |
| 3886 | |
| 3887 | static void del_ch_napi(struct dpaa2_eth_priv *priv) |
| 3888 | { |
| 3889 | int i; |
| 3890 | struct dpaa2_eth_channel *ch; |
| 3891 | |
| 3892 | for (i = 0; i < priv->num_channels; i++) { |
| 3893 | ch = priv->channel[i]; |
| 3894 | netif_napi_del(&ch->napi); |
| 3895 | } |
| 3896 | } |
| 3897 | |
| 3898 | static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) |
| 3899 | { |
| 3900 | struct device *dev; |
| 3901 | struct net_device *net_dev = NULL; |
| 3902 | struct dpaa2_eth_priv *priv = NULL; |
| 3903 | int err = 0; |
| 3904 | |
| 3905 | dev = &dpni_dev->dev; |
| 3906 | |
| 3907 | /* Net device */ |
Ioana Radulescu | ab1e6de | 2019-06-11 14:50:03 +0300 | [diff] [blame] | 3908 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3909 | if (!net_dev) { |
| 3910 | dev_err(dev, "alloc_etherdev_mq() failed\n"); |
| 3911 | return -ENOMEM; |
| 3912 | } |
| 3913 | |
| 3914 | SET_NETDEV_DEV(net_dev, dev); |
| 3915 | dev_set_drvdata(dev, net_dev); |
| 3916 | |
| 3917 | priv = netdev_priv(net_dev); |
| 3918 | priv->net_dev = net_dev; |
| 3919 | |
Ioana Radulescu | 08eb239 | 2017-05-24 07:13:27 -0500 | [diff] [blame] | 3920 | priv->iommu_domain = iommu_get_domain_for_dev(dev); |
| 3921 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3922 | /* Obtain a MC portal */ |
| 3923 | err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, |
| 3924 | &priv->mc_io); |
| 3925 | if (err) { |
Ioana Radulescu | 8c36961 | 2018-03-20 07:04:46 -0500 | [diff] [blame] | 3926 | if (err == -ENXIO) |
| 3927 | err = -EPROBE_DEFER; |
| 3928 | else |
| 3929 | dev_err(dev, "MC portal allocation failed\n"); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3930 | goto err_portal_alloc; |
| 3931 | } |
| 3932 | |
| 3933 | /* MC objects initialization and configuration */ |
| 3934 | err = setup_dpni(dpni_dev); |
| 3935 | if (err) |
| 3936 | goto err_dpni_setup; |
| 3937 | |
| 3938 | err = setup_dpio(priv); |
| 3939 | if (err) |
| 3940 | goto err_dpio_setup; |
| 3941 | |
| 3942 | setup_fqs(priv); |
| 3943 | |
| 3944 | err = setup_dpbp(priv); |
| 3945 | if (err) |
| 3946 | goto err_dpbp_setup; |
| 3947 | |
| 3948 | err = bind_dpni(priv); |
| 3949 | if (err) |
| 3950 | goto err_bind; |
| 3951 | |
| 3952 | /* Add a NAPI context for each channel */ |
| 3953 | add_ch_napi(priv); |
| 3954 | |
| 3955 | /* Percpu statistics */ |
| 3956 | priv->percpu_stats = alloc_percpu(*priv->percpu_stats); |
| 3957 | if (!priv->percpu_stats) { |
| 3958 | dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); |
| 3959 | err = -ENOMEM; |
| 3960 | goto err_alloc_percpu_stats; |
| 3961 | } |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 3962 | priv->percpu_extras = alloc_percpu(*priv->percpu_extras); |
| 3963 | if (!priv->percpu_extras) { |
| 3964 | dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); |
| 3965 | err = -ENOMEM; |
| 3966 | goto err_alloc_percpu_extras; |
| 3967 | } |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3968 | |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 3969 | priv->sgt_cache = alloc_percpu(*priv->sgt_cache); |
| 3970 | if (!priv->sgt_cache) { |
| 3971 | dev_err(dev, "alloc_percpu(sgt_cache) failed\n"); |
| 3972 | err = -ENOMEM; |
| 3973 | goto err_alloc_sgt_cache; |
| 3974 | } |
| 3975 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 3976 | err = netdev_init(net_dev); |
| 3977 | if (err) |
| 3978 | goto err_netdev_init; |
| 3979 | |
| 3980 | /* Configure checksum offload based on current interface flags */ |
| 3981 | err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); |
| 3982 | if (err) |
| 3983 | goto err_csum; |
| 3984 | |
| 3985 | err = set_tx_csum(priv, !!(net_dev->features & |
| 3986 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); |
| 3987 | if (err) |
| 3988 | goto err_csum; |
| 3989 | |
| 3990 | err = alloc_rings(priv); |
| 3991 | if (err) |
| 3992 | goto err_alloc_rings; |
| 3993 | |
Ioana Ciornei | f395b69 | 2020-05-31 00:08:13 +0300 | [diff] [blame] | 3994 | #ifdef CONFIG_FSL_DPAA2_ETH_DCB |
| 3995 | if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) { |
| 3996 | priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; |
| 3997 | net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; |
| 3998 | } else { |
| 3999 | dev_dbg(dev, "PFC not supported\n"); |
| 4000 | } |
| 4001 | #endif |
| 4002 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4003 | err = setup_irqs(dpni_dev); |
| 4004 | if (err) { |
| 4005 | netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); |
| 4006 | priv->poll_thread = kthread_run(poll_link_state, priv, |
| 4007 | "%s_poll_link", net_dev->name); |
| 4008 | if (IS_ERR(priv->poll_thread)) { |
Ioana Radulescu | 7f12c8a3 | 2018-08-29 04:42:39 -0500 | [diff] [blame] | 4009 | dev_err(dev, "Error starting polling thread\n"); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4010 | goto err_poll_thread; |
| 4011 | } |
| 4012 | priv->do_link_poll = true; |
| 4013 | } |
| 4014 | |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 4015 | err = dpaa2_eth_connect_mac(priv); |
| 4016 | if (err) |
| 4017 | goto err_connect_mac; |
| 4018 | |
Ioana Radulescu | 7f12c8a3 | 2018-08-29 04:42:39 -0500 | [diff] [blame] | 4019 | err = register_netdev(net_dev); |
| 4020 | if (err < 0) { |
| 4021 | dev_err(dev, "register_netdev() failed\n"); |
| 4022 | goto err_netdev_reg; |
| 4023 | } |
| 4024 | |
Ioana Radulescu | 091a19e | 2019-01-18 16:16:00 +0000 | [diff] [blame] | 4025 | #ifdef CONFIG_DEBUG_FS |
| 4026 | dpaa2_dbg_add(priv); |
| 4027 | #endif |
| 4028 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4029 | dev_info(dev, "Probed interface %s\n", net_dev->name); |
| 4030 | return 0; |
| 4031 | |
Ioana Radulescu | 7f12c8a3 | 2018-08-29 04:42:39 -0500 | [diff] [blame] | 4032 | err_netdev_reg: |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 4033 | dpaa2_eth_disconnect_mac(priv); |
| 4034 | err_connect_mac: |
Ioana Radulescu | 7f12c8a3 | 2018-08-29 04:42:39 -0500 | [diff] [blame] | 4035 | if (priv->do_link_poll) |
| 4036 | kthread_stop(priv->poll_thread); |
| 4037 | else |
| 4038 | fsl_mc_free_irqs(dpni_dev); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4039 | err_poll_thread: |
| 4040 | free_rings(priv); |
| 4041 | err_alloc_rings: |
| 4042 | err_csum: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4043 | err_netdev_init: |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 4044 | free_percpu(priv->sgt_cache); |
| 4045 | err_alloc_sgt_cache: |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 4046 | free_percpu(priv->percpu_extras); |
| 4047 | err_alloc_percpu_extras: |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4048 | free_percpu(priv->percpu_stats); |
| 4049 | err_alloc_percpu_stats: |
| 4050 | del_ch_napi(priv); |
| 4051 | err_bind: |
| 4052 | free_dpbp(priv); |
| 4053 | err_dpbp_setup: |
| 4054 | free_dpio(priv); |
| 4055 | err_dpio_setup: |
| 4056 | free_dpni(priv); |
| 4057 | err_dpni_setup: |
| 4058 | fsl_mc_portal_free(priv->mc_io); |
| 4059 | err_portal_alloc: |
| 4060 | dev_set_drvdata(dev, NULL); |
| 4061 | free_netdev(net_dev); |
| 4062 | |
| 4063 | return err; |
| 4064 | } |
| 4065 | |
| 4066 | static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) |
| 4067 | { |
| 4068 | struct device *dev; |
| 4069 | struct net_device *net_dev; |
| 4070 | struct dpaa2_eth_priv *priv; |
| 4071 | |
| 4072 | dev = &ls_dev->dev; |
| 4073 | net_dev = dev_get_drvdata(dev); |
| 4074 | priv = netdev_priv(net_dev); |
| 4075 | |
Ioana Radulescu | 091a19e | 2019-01-18 16:16:00 +0000 | [diff] [blame] | 4076 | #ifdef CONFIG_DEBUG_FS |
| 4077 | dpaa2_dbg_remove(priv); |
| 4078 | #endif |
Ioana Ciornei | 7194792 | 2019-10-31 01:18:31 +0200 | [diff] [blame] | 4079 | rtnl_lock(); |
| 4080 | dpaa2_eth_disconnect_mac(priv); |
| 4081 | rtnl_unlock(); |
| 4082 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4083 | unregister_netdev(net_dev); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4084 | |
| 4085 | if (priv->do_link_poll) |
| 4086 | kthread_stop(priv->poll_thread); |
| 4087 | else |
| 4088 | fsl_mc_free_irqs(ls_dev); |
| 4089 | |
| 4090 | free_rings(priv); |
Ioana Ciornei | d70446e | 2020-06-29 21:47:11 +0300 | [diff] [blame] | 4091 | free_percpu(priv->sgt_cache); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4092 | free_percpu(priv->percpu_stats); |
Ioana Radulescu | 85047ab | 2017-04-28 04:50:31 -0500 | [diff] [blame] | 4093 | free_percpu(priv->percpu_extras); |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4094 | |
| 4095 | del_ch_napi(priv); |
| 4096 | free_dpbp(priv); |
| 4097 | free_dpio(priv); |
| 4098 | free_dpni(priv); |
| 4099 | |
| 4100 | fsl_mc_portal_free(priv->mc_io); |
| 4101 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4102 | free_netdev(net_dev); |
| 4103 | |
Ioana Radulescu | 4bc07aa | 2018-03-23 10:23:36 -0500 | [diff] [blame] | 4104 | dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); |
Ioana Radulescu | 7472dd9 | 2018-03-23 08:44:06 -0500 | [diff] [blame] | 4105 | |
Ioana Radulescu | 6e2387e | 2017-04-28 04:50:29 -0500 | [diff] [blame] | 4106 | return 0; |
| 4107 | } |
| 4108 | |
| 4109 | static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { |
| 4110 | { |
| 4111 | .vendor = FSL_MC_VENDOR_FREESCALE, |
| 4112 | .obj_type = "dpni", |
| 4113 | }, |
| 4114 | { .vendor = 0x0 } |
| 4115 | }; |
| 4116 | MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); |
| 4117 | |
| 4118 | static struct fsl_mc_driver dpaa2_eth_driver = { |
| 4119 | .driver = { |
| 4120 | .name = KBUILD_MODNAME, |
| 4121 | .owner = THIS_MODULE, |
| 4122 | }, |
| 4123 | .probe = dpaa2_eth_probe, |
| 4124 | .remove = dpaa2_eth_remove, |
| 4125 | .match_id_table = dpaa2_eth_match_id_table |
| 4126 | }; |
| 4127 | |
Ioana Radulescu | 091a19e | 2019-01-18 16:16:00 +0000 | [diff] [blame] | 4128 | static int __init dpaa2_eth_driver_init(void) |
| 4129 | { |
| 4130 | int err; |
| 4131 | |
| 4132 | dpaa2_eth_dbg_init(); |
| 4133 | err = fsl_mc_driver_register(&dpaa2_eth_driver); |
| 4134 | if (err) { |
| 4135 | dpaa2_eth_dbg_exit(); |
| 4136 | return err; |
| 4137 | } |
| 4138 | |
| 4139 | return 0; |
| 4140 | } |
| 4141 | |
| 4142 | static void __exit dpaa2_eth_driver_exit(void) |
| 4143 | { |
| 4144 | dpaa2_eth_dbg_exit(); |
| 4145 | fsl_mc_driver_unregister(&dpaa2_eth_driver); |
| 4146 | } |
| 4147 | |
| 4148 | module_init(dpaa2_eth_driver_init); |
| 4149 | module_exit(dpaa2_eth_driver_exit); |