Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include <net/xsk_buff_pool.h> |
| 4 | #include <net/xdp_sock.h> |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 5 | #include <net/xdp_sock_drv.h> |
| 6 | #include <linux/dma-direct.h> |
| 7 | #include <linux/dma-noncoherent.h> |
| 8 | #include <linux/swiotlb.h> |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 9 | |
| 10 | #include "xsk_queue.h" |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 11 | #include "xdp_umem.h" |
| 12 | #include "xsk.h" |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 13 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 14 | static void xp_addr_unmap(struct xsk_buff_pool *pool) |
| 15 | { |
| 16 | vunmap(pool->addrs); |
| 17 | } |
| 18 | |
| 19 | static int xp_addr_map(struct xsk_buff_pool *pool, |
| 20 | struct page **pages, u32 nr_pages) |
| 21 | { |
| 22 | pool->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); |
| 23 | if (!pool->addrs) |
| 24 | return -ENOMEM; |
| 25 | return 0; |
| 26 | } |
| 27 | |
| 28 | void xp_destroy(struct xsk_buff_pool *pool) |
| 29 | { |
| 30 | if (!pool) |
| 31 | return; |
| 32 | |
| 33 | xp_addr_unmap(pool); |
| 34 | kvfree(pool->heads); |
| 35 | kvfree(pool); |
| 36 | } |
| 37 | |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 38 | struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, |
| 39 | struct xdp_umem *umem) |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 40 | { |
| 41 | struct xsk_buff_pool *pool; |
| 42 | struct xdp_buff_xsk *xskb; |
| 43 | int err; |
| 44 | u32 i; |
| 45 | |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 46 | pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), |
| 47 | GFP_KERNEL); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 48 | if (!pool) |
| 49 | goto out; |
| 50 | |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 51 | pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 52 | if (!pool->heads) |
| 53 | goto out; |
| 54 | |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 55 | pool->chunk_mask = ~((u64)umem->chunk_size - 1); |
| 56 | pool->addrs_cnt = umem->size; |
| 57 | pool->heads_cnt = umem->chunks; |
| 58 | pool->free_heads_cnt = umem->chunks; |
| 59 | pool->headroom = umem->headroom; |
| 60 | pool->chunk_size = umem->chunk_size; |
| 61 | pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; |
| 62 | pool->frame_len = umem->chunk_size - umem->headroom - |
| 63 | XDP_PACKET_HEADROOM; |
Magnus Karlsson | 1742b3d | 2020-08-28 10:26:15 +0200 | [diff] [blame] | 64 | pool->umem = umem; |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 65 | INIT_LIST_HEAD(&pool->free_list); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 66 | refcount_set(&pool->users, 1); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 67 | |
Magnus Karlsson | 7361f9c | 2020-08-28 10:26:18 +0200 | [diff] [blame] | 68 | pool->fq = xs->fq_tmp; |
| 69 | pool->cq = xs->cq_tmp; |
| 70 | xs->fq_tmp = NULL; |
| 71 | xs->cq_tmp = NULL; |
| 72 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 73 | for (i = 0; i < pool->free_heads_cnt; i++) { |
| 74 | xskb = &pool->heads[i]; |
| 75 | xskb->pool = pool; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 76 | xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 77 | pool->free_heads[i] = xskb; |
| 78 | } |
| 79 | |
Magnus Karlsson | 1742b3d | 2020-08-28 10:26:15 +0200 | [diff] [blame] | 80 | err = xp_addr_map(pool, umem->pgs, umem->npgs); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 81 | if (!err) |
| 82 | return pool; |
| 83 | |
| 84 | out: |
| 85 | xp_destroy(pool); |
| 86 | return NULL; |
| 87 | } |
| 88 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 89 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) |
| 90 | { |
| 91 | u32 i; |
| 92 | |
| 93 | for (i = 0; i < pool->heads_cnt; i++) |
| 94 | pool->heads[i].xdp.rxq = rxq; |
| 95 | } |
| 96 | EXPORT_SYMBOL(xp_set_rxq_info); |
| 97 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 98 | int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev, |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 99 | u16 queue_id, u16 flags) |
| 100 | { |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 101 | bool force_zc, force_copy; |
| 102 | struct netdev_bpf bpf; |
| 103 | int err = 0; |
| 104 | |
| 105 | ASSERT_RTNL(); |
| 106 | |
| 107 | force_zc = flags & XDP_ZEROCOPY; |
| 108 | force_copy = flags & XDP_COPY; |
| 109 | |
| 110 | if (force_zc && force_copy) |
| 111 | return -EINVAL; |
| 112 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 113 | if (xsk_get_pool_from_qid(netdev, queue_id)) |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 114 | return -EBUSY; |
| 115 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 116 | err = xsk_reg_pool_at_qid(netdev, pool, queue_id); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 117 | if (err) |
| 118 | return err; |
| 119 | |
| 120 | if (flags & XDP_USE_NEED_WAKEUP) { |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 121 | pool->uses_need_wakeup = true; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 122 | /* Tx needs to be explicitly woken up the first time. |
| 123 | * Also for supporting drivers that do not implement this |
| 124 | * feature. They will always have to call sendto(). |
| 125 | */ |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 126 | pool->cached_need_wakeup = XDP_WAKEUP_TX; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 127 | } |
| 128 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 129 | dev_hold(netdev); |
| 130 | |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 131 | if (force_copy) |
| 132 | /* For copy-mode, we are done. */ |
| 133 | return 0; |
| 134 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 135 | if (!netdev->netdev_ops->ndo_bpf || |
| 136 | !netdev->netdev_ops->ndo_xsk_wakeup) { |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 137 | err = -EOPNOTSUPP; |
| 138 | goto err_unreg_pool; |
| 139 | } |
| 140 | |
| 141 | bpf.command = XDP_SETUP_XSK_POOL; |
| 142 | bpf.xsk.pool = pool; |
| 143 | bpf.xsk.queue_id = queue_id; |
| 144 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 145 | err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 146 | if (err) |
| 147 | goto err_unreg_pool; |
| 148 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 149 | pool->netdev = netdev; |
| 150 | pool->queue_id = queue_id; |
| 151 | pool->umem->zc = true; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 152 | return 0; |
| 153 | |
| 154 | err_unreg_pool: |
| 155 | if (!force_zc) |
| 156 | err = 0; /* fallback to copy mode */ |
| 157 | if (err) |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 158 | xsk_clear_pool_at_qid(netdev, queue_id); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 159 | return err; |
| 160 | } |
| 161 | |
| 162 | void xp_clear_dev(struct xsk_buff_pool *pool) |
| 163 | { |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 164 | struct netdev_bpf bpf; |
| 165 | int err; |
| 166 | |
| 167 | ASSERT_RTNL(); |
| 168 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 169 | if (!pool->netdev) |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 170 | return; |
| 171 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 172 | if (pool->umem->zc) { |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 173 | bpf.command = XDP_SETUP_XSK_POOL; |
| 174 | bpf.xsk.pool = NULL; |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 175 | bpf.xsk.queue_id = pool->queue_id; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 176 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 177 | err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 178 | |
| 179 | if (err) |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 180 | WARN(1, "Failed to disable zero-copy!\n"); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 181 | } |
| 182 | |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame^] | 183 | xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); |
| 184 | dev_put(pool->netdev); |
| 185 | pool->netdev = NULL; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static void xp_release_deferred(struct work_struct *work) |
| 189 | { |
| 190 | struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, |
| 191 | work); |
| 192 | |
| 193 | rtnl_lock(); |
| 194 | xp_clear_dev(pool); |
| 195 | rtnl_unlock(); |
| 196 | |
Magnus Karlsson | 7361f9c | 2020-08-28 10:26:18 +0200 | [diff] [blame] | 197 | if (pool->fq) { |
| 198 | xskq_destroy(pool->fq); |
| 199 | pool->fq = NULL; |
| 200 | } |
| 201 | |
| 202 | if (pool->cq) { |
| 203 | xskq_destroy(pool->cq); |
| 204 | pool->cq = NULL; |
| 205 | } |
| 206 | |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 207 | xdp_put_umem(pool->umem); |
| 208 | xp_destroy(pool); |
| 209 | } |
| 210 | |
| 211 | void xp_get_pool(struct xsk_buff_pool *pool) |
| 212 | { |
| 213 | refcount_inc(&pool->users); |
| 214 | } |
| 215 | |
| 216 | void xp_put_pool(struct xsk_buff_pool *pool) |
| 217 | { |
| 218 | if (!pool) |
| 219 | return; |
| 220 | |
| 221 | if (refcount_dec_and_test(&pool->users)) { |
| 222 | INIT_WORK(&pool->work, xp_release_deferred); |
| 223 | schedule_work(&pool->work); |
| 224 | } |
| 225 | } |
| 226 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 227 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) |
| 228 | { |
| 229 | dma_addr_t *dma; |
| 230 | u32 i; |
| 231 | |
| 232 | if (pool->dma_pages_cnt == 0) |
| 233 | return; |
| 234 | |
| 235 | for (i = 0; i < pool->dma_pages_cnt; i++) { |
| 236 | dma = &pool->dma_pages[i]; |
| 237 | if (*dma) { |
| 238 | dma_unmap_page_attrs(pool->dev, *dma, PAGE_SIZE, |
| 239 | DMA_BIDIRECTIONAL, attrs); |
| 240 | *dma = 0; |
| 241 | } |
| 242 | } |
| 243 | |
| 244 | kvfree(pool->dma_pages); |
| 245 | pool->dma_pages_cnt = 0; |
| 246 | pool->dev = NULL; |
| 247 | } |
| 248 | EXPORT_SYMBOL(xp_dma_unmap); |
| 249 | |
| 250 | static void xp_check_dma_contiguity(struct xsk_buff_pool *pool) |
| 251 | { |
| 252 | u32 i; |
| 253 | |
| 254 | for (i = 0; i < pool->dma_pages_cnt - 1; i++) { |
| 255 | if (pool->dma_pages[i] + PAGE_SIZE == pool->dma_pages[i + 1]) |
| 256 | pool->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; |
| 257 | else |
| 258 | pool->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; |
| 259 | } |
| 260 | } |
| 261 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 262 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, |
| 263 | unsigned long attrs, struct page **pages, u32 nr_pages) |
| 264 | { |
| 265 | dma_addr_t dma; |
| 266 | u32 i; |
| 267 | |
| 268 | pool->dma_pages = kvcalloc(nr_pages, sizeof(*pool->dma_pages), |
| 269 | GFP_KERNEL); |
| 270 | if (!pool->dma_pages) |
| 271 | return -ENOMEM; |
| 272 | |
| 273 | pool->dev = dev; |
| 274 | pool->dma_pages_cnt = nr_pages; |
Christoph Hellwig | 7e02457 | 2020-06-29 15:03:59 +0200 | [diff] [blame] | 275 | pool->dma_need_sync = false; |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 276 | |
| 277 | for (i = 0; i < pool->dma_pages_cnt; i++) { |
| 278 | dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, |
| 279 | DMA_BIDIRECTIONAL, attrs); |
| 280 | if (dma_mapping_error(dev, dma)) { |
| 281 | xp_dma_unmap(pool, attrs); |
| 282 | return -ENOMEM; |
| 283 | } |
Christoph Hellwig | 7e02457 | 2020-06-29 15:03:59 +0200 | [diff] [blame] | 284 | if (dma_need_sync(dev, dma)) |
| 285 | pool->dma_need_sync = true; |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 286 | pool->dma_pages[i] = dma; |
| 287 | } |
| 288 | |
| 289 | if (pool->unaligned) |
| 290 | xp_check_dma_contiguity(pool); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 291 | return 0; |
| 292 | } |
| 293 | EXPORT_SYMBOL(xp_dma_map); |
| 294 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 295 | static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, |
| 296 | u64 addr) |
| 297 | { |
| 298 | return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); |
| 299 | } |
| 300 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 301 | static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) |
| 302 | { |
| 303 | *addr = xp_unaligned_extract_addr(*addr); |
| 304 | if (*addr >= pool->addrs_cnt || |
| 305 | *addr + pool->chunk_size > pool->addrs_cnt || |
| 306 | xp_addr_crosses_non_contig_pg(pool, *addr)) |
| 307 | return false; |
| 308 | return true; |
| 309 | } |
| 310 | |
| 311 | static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) |
| 312 | { |
| 313 | *addr = xp_aligned_extract_addr(pool, *addr); |
| 314 | return *addr < pool->addrs_cnt; |
| 315 | } |
| 316 | |
| 317 | static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) |
| 318 | { |
| 319 | struct xdp_buff_xsk *xskb; |
| 320 | u64 addr; |
| 321 | bool ok; |
| 322 | |
| 323 | if (pool->free_heads_cnt == 0) |
| 324 | return NULL; |
| 325 | |
| 326 | xskb = pool->free_heads[--pool->free_heads_cnt]; |
| 327 | |
| 328 | for (;;) { |
| 329 | if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { |
Ciara Loftus | 8aa5a33 | 2020-07-08 07:28:33 +0000 | [diff] [blame] | 330 | pool->fq->queue_empty_descs++; |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 331 | xp_release(xskb); |
| 332 | return NULL; |
| 333 | } |
| 334 | |
| 335 | ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : |
| 336 | xp_check_aligned(pool, &addr); |
| 337 | if (!ok) { |
| 338 | pool->fq->invalid_descs++; |
| 339 | xskq_cons_release(pool->fq); |
| 340 | continue; |
| 341 | } |
| 342 | break; |
| 343 | } |
| 344 | xskq_cons_release(pool->fq); |
| 345 | |
| 346 | xskb->orig_addr = addr; |
| 347 | xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; |
| 348 | if (pool->dma_pages_cnt) { |
| 349 | xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] & |
| 350 | ~XSK_NEXT_PG_CONTIG_MASK) + |
| 351 | (addr & ~PAGE_MASK); |
| 352 | xskb->dma = xskb->frame_dma + pool->headroom + |
| 353 | XDP_PACKET_HEADROOM; |
| 354 | } |
| 355 | return xskb; |
| 356 | } |
| 357 | |
| 358 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) |
| 359 | { |
| 360 | struct xdp_buff_xsk *xskb; |
| 361 | |
| 362 | if (!pool->free_list_cnt) { |
| 363 | xskb = __xp_alloc(pool); |
| 364 | if (!xskb) |
| 365 | return NULL; |
| 366 | } else { |
| 367 | pool->free_list_cnt--; |
| 368 | xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, |
| 369 | free_list_node); |
| 370 | list_del(&xskb->free_list_node); |
| 371 | } |
| 372 | |
| 373 | xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; |
| 374 | xskb->xdp.data_meta = xskb->xdp.data; |
| 375 | |
Christoph Hellwig | 91d5b70 | 2020-06-29 15:03:57 +0200 | [diff] [blame] | 376 | if (pool->dma_need_sync) { |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 377 | dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, |
| 378 | pool->frame_len, |
| 379 | DMA_BIDIRECTIONAL); |
| 380 | } |
| 381 | return &xskb->xdp; |
| 382 | } |
| 383 | EXPORT_SYMBOL(xp_alloc); |
| 384 | |
| 385 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) |
| 386 | { |
| 387 | if (pool->free_list_cnt >= count) |
| 388 | return true; |
| 389 | return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); |
| 390 | } |
| 391 | EXPORT_SYMBOL(xp_can_alloc); |
| 392 | |
| 393 | void xp_free(struct xdp_buff_xsk *xskb) |
| 394 | { |
| 395 | xskb->pool->free_list_cnt++; |
| 396 | list_add(&xskb->free_list_node, &xskb->pool->free_list); |
| 397 | } |
| 398 | EXPORT_SYMBOL(xp_free); |
| 399 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 400 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
| 401 | { |
| 402 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; |
| 403 | return pool->addrs + addr; |
| 404 | } |
| 405 | EXPORT_SYMBOL(xp_raw_get_data); |
| 406 | |
| 407 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) |
| 408 | { |
| 409 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; |
| 410 | return (pool->dma_pages[addr >> PAGE_SHIFT] & |
| 411 | ~XSK_NEXT_PG_CONTIG_MASK) + |
| 412 | (addr & ~PAGE_MASK); |
| 413 | } |
| 414 | EXPORT_SYMBOL(xp_raw_get_dma); |
| 415 | |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 416 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 417 | { |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 418 | dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, |
| 419 | xskb->pool->frame_len, DMA_BIDIRECTIONAL); |
| 420 | } |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 421 | EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 422 | |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 423 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, |
| 424 | size_t size) |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 425 | { |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 426 | dma_sync_single_range_for_device(pool->dev, dma, 0, |
| 427 | size, DMA_BIDIRECTIONAL); |
| 428 | } |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 429 | EXPORT_SYMBOL(xp_dma_sync_for_device_slow); |