Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* XDP user-space ring structure |
| 3 | * Copyright(c) 2018 Intel Corporation. |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame^] | 6 | #include <linux/log2.h> |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 7 | #include <linux/slab.h> |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame^] | 8 | #include <linux/overflow.h> |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 9 | |
| 10 | #include "xsk_queue.h" |
| 11 | |
Magnus Karlsson | 93ee30f | 2018-08-31 13:40:02 +0200 | [diff] [blame] | 12 | void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask) |
Magnus Karlsson | 965a990 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 13 | { |
| 14 | if (!q) |
| 15 | return; |
| 16 | |
Magnus Karlsson | 93ee30f | 2018-08-31 13:40:02 +0200 | [diff] [blame] | 17 | q->size = size; |
| 18 | q->chunk_mask = chunk_mask; |
Magnus Karlsson | 965a990 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 19 | } |
| 20 | |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 21 | static u32 xskq_umem_get_ring_size(struct xsk_queue *q) |
| 22 | { |
Björn Töpel | bbff2f3 | 2018-06-04 13:57:13 +0200 | [diff] [blame] | 23 | return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64); |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 24 | } |
| 25 | |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 26 | static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q) |
| 27 | { |
Björn Töpel | da60cf0 | 2018-05-18 14:00:23 +0200 | [diff] [blame] | 28 | return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc); |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 29 | } |
| 30 | |
| 31 | struct xsk_queue *xskq_create(u32 nentries, bool umem_queue) |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 32 | { |
| 33 | struct xsk_queue *q; |
| 34 | gfp_t gfp_flags; |
| 35 | size_t size; |
| 36 | |
| 37 | q = kzalloc(sizeof(*q), GFP_KERNEL); |
| 38 | if (!q) |
| 39 | return NULL; |
| 40 | |
| 41 | q->nentries = nentries; |
| 42 | q->ring_mask = nentries - 1; |
| 43 | |
| 44 | gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | |
| 45 | __GFP_COMP | __GFP_NORETRY; |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 46 | size = umem_queue ? xskq_umem_get_ring_size(q) : |
| 47 | xskq_rxtx_get_ring_size(q); |
Magnus Karlsson | 423f383 | 2018-05-02 13:01:24 +0200 | [diff] [blame] | 48 | |
| 49 | q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags, |
| 50 | get_order(size)); |
| 51 | if (!q->ring) { |
| 52 | kfree(q); |
| 53 | return NULL; |
| 54 | } |
| 55 | |
| 56 | return q; |
| 57 | } |
| 58 | |
| 59 | void xskq_destroy(struct xsk_queue *q) |
| 60 | { |
| 61 | if (!q) |
| 62 | return; |
| 63 | |
| 64 | page_frag_free(q->ring); |
| 65 | kfree(q); |
| 66 | } |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame^] | 67 | |
| 68 | struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) |
| 69 | { |
| 70 | struct xdp_umem_fq_reuse *newq; |
| 71 | |
| 72 | /* Check for overflow */ |
| 73 | if (nentries > (u32)roundup_pow_of_two(nentries)) |
| 74 | return NULL; |
| 75 | nentries = roundup_pow_of_two(nentries); |
| 76 | |
| 77 | newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL); |
| 78 | if (!newq) |
| 79 | return NULL; |
| 80 | memset(newq, 0, offsetof(typeof(*newq), handles)); |
| 81 | |
| 82 | newq->nentries = nentries; |
| 83 | return newq; |
| 84 | } |
| 85 | EXPORT_SYMBOL_GPL(xsk_reuseq_prepare); |
| 86 | |
| 87 | struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, |
| 88 | struct xdp_umem_fq_reuse *newq) |
| 89 | { |
| 90 | struct xdp_umem_fq_reuse *oldq = umem->fq_reuse; |
| 91 | |
| 92 | if (!oldq) { |
| 93 | umem->fq_reuse = newq; |
| 94 | return NULL; |
| 95 | } |
| 96 | |
| 97 | if (newq->nentries < oldq->length) |
| 98 | return newq; |
| 99 | |
| 100 | memcpy(newq->handles, oldq->handles, |
| 101 | array_size(oldq->length, sizeof(u64))); |
| 102 | newq->length = oldq->length; |
| 103 | |
| 104 | umem->fq_reuse = newq; |
| 105 | return oldq; |
| 106 | } |
| 107 | EXPORT_SYMBOL_GPL(xsk_reuseq_swap); |
| 108 | |
| 109 | void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) |
| 110 | { |
| 111 | kvfree(rq); |
| 112 | } |
| 113 | EXPORT_SYMBOL_GPL(xsk_reuseq_free); |
| 114 | |
| 115 | void xsk_reuseq_destroy(struct xdp_umem *umem) |
| 116 | { |
| 117 | xsk_reuseq_free(umem->fq_reuse); |
| 118 | umem->fq_reuse = NULL; |
| 119 | } |