Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 2 | /* |
| 3 | * Definitions for the 'struct skb_array' datastructure. |
| 4 | * |
| 5 | * Author: |
| 6 | * Michael S. Tsirkin <mst@redhat.com> |
| 7 | * |
| 8 | * Copyright (C) 2016 Red Hat, Inc. |
| 9 | * |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 10 | * Limited-size FIFO of skbs. Can be used more or less whenever |
| 11 | * sk_buff_head can be used, except you need to know the queue size in |
| 12 | * advance. |
| 13 | * Implemented as a type-safe wrapper around ptr_ring. |
| 14 | */ |
| 15 | |
| 16 | #ifndef _LINUX_SKB_ARRAY_H |
| 17 | #define _LINUX_SKB_ARRAY_H 1 |
| 18 | |
| 19 | #ifdef __KERNEL__ |
| 20 | #include <linux/ptr_ring.h> |
| 21 | #include <linux/skbuff.h> |
| 22 | #include <linux/if_vlan.h> |
| 23 | #endif |
| 24 | |
| 25 | struct skb_array { |
| 26 | struct ptr_ring ring; |
| 27 | }; |
| 28 | |
| 29 | /* Might be slightly faster than skb_array_full below, but callers invoking |
| 30 | * this in a loop must use a compiler barrier, for example cpu_relax(). |
| 31 | */ |
| 32 | static inline bool __skb_array_full(struct skb_array *a) |
| 33 | { |
| 34 | return __ptr_ring_full(&a->ring); |
| 35 | } |
| 36 | |
| 37 | static inline bool skb_array_full(struct skb_array *a) |
| 38 | { |
| 39 | return ptr_ring_full(&a->ring); |
| 40 | } |
| 41 | |
| 42 | static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb) |
| 43 | { |
| 44 | return ptr_ring_produce(&a->ring, skb); |
| 45 | } |
| 46 | |
| 47 | static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb) |
| 48 | { |
| 49 | return ptr_ring_produce_irq(&a->ring, skb); |
| 50 | } |
| 51 | |
| 52 | static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb) |
| 53 | { |
| 54 | return ptr_ring_produce_bh(&a->ring, skb); |
| 55 | } |
| 56 | |
| 57 | static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb) |
| 58 | { |
| 59 | return ptr_ring_produce_any(&a->ring, skb); |
| 60 | } |
| 61 | |
Michael S. Tsirkin | 7d7072e | 2016-06-13 23:54:50 +0300 | [diff] [blame] | 62 | /* Might be slightly faster than skb_array_empty below, but only safe if the |
| 63 | * array is never resized. Also, callers invoking this in a loop must take care |
| 64 | * to use a compiler barrier, for example cpu_relax(). |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 65 | */ |
| 66 | static inline bool __skb_array_empty(struct skb_array *a) |
| 67 | { |
Michael S. Tsirkin | f417dc2 | 2018-01-26 01:36:36 +0200 | [diff] [blame] | 68 | return __ptr_ring_empty(&a->ring); |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 69 | } |
| 70 | |
John Fastabend | 4a86a4c | 2017-12-07 09:57:59 -0800 | [diff] [blame] | 71 | static inline struct sk_buff *__skb_array_peek(struct skb_array *a) |
| 72 | { |
| 73 | return __ptr_ring_peek(&a->ring); |
| 74 | } |
| 75 | |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 76 | static inline bool skb_array_empty(struct skb_array *a) |
| 77 | { |
| 78 | return ptr_ring_empty(&a->ring); |
| 79 | } |
| 80 | |
Michael S. Tsirkin | 7d7072e | 2016-06-13 23:54:50 +0300 | [diff] [blame] | 81 | static inline bool skb_array_empty_bh(struct skb_array *a) |
| 82 | { |
| 83 | return ptr_ring_empty_bh(&a->ring); |
| 84 | } |
| 85 | |
| 86 | static inline bool skb_array_empty_irq(struct skb_array *a) |
| 87 | { |
| 88 | return ptr_ring_empty_irq(&a->ring); |
| 89 | } |
| 90 | |
| 91 | static inline bool skb_array_empty_any(struct skb_array *a) |
| 92 | { |
| 93 | return ptr_ring_empty_any(&a->ring); |
| 94 | } |
| 95 | |
Paolo Abeni | 021a17e | 2018-05-15 16:24:37 +0200 | [diff] [blame] | 96 | static inline struct sk_buff *__skb_array_consume(struct skb_array *a) |
| 97 | { |
| 98 | return __ptr_ring_consume(&a->ring); |
| 99 | } |
| 100 | |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 101 | static inline struct sk_buff *skb_array_consume(struct skb_array *a) |
| 102 | { |
| 103 | return ptr_ring_consume(&a->ring); |
| 104 | } |
| 105 | |
Jason Wang | 3528c1a | 2017-05-17 12:14:40 +0800 | [diff] [blame] | 106 | static inline int skb_array_consume_batched(struct skb_array *a, |
| 107 | struct sk_buff **array, int n) |
| 108 | { |
| 109 | return ptr_ring_consume_batched(&a->ring, (void **)array, n); |
| 110 | } |
| 111 | |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 112 | static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a) |
| 113 | { |
| 114 | return ptr_ring_consume_irq(&a->ring); |
| 115 | } |
| 116 | |
Jason Wang | 3528c1a | 2017-05-17 12:14:40 +0800 | [diff] [blame] | 117 | static inline int skb_array_consume_batched_irq(struct skb_array *a, |
| 118 | struct sk_buff **array, int n) |
| 119 | { |
| 120 | return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); |
| 121 | } |
| 122 | |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 123 | static inline struct sk_buff *skb_array_consume_any(struct skb_array *a) |
| 124 | { |
| 125 | return ptr_ring_consume_any(&a->ring); |
| 126 | } |
| 127 | |
Jason Wang | 3528c1a | 2017-05-17 12:14:40 +0800 | [diff] [blame] | 128 | static inline int skb_array_consume_batched_any(struct skb_array *a, |
| 129 | struct sk_buff **array, int n) |
| 130 | { |
| 131 | return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); |
| 132 | } |
| 133 | |
| 134 | |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 135 | static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a) |
| 136 | { |
| 137 | return ptr_ring_consume_bh(&a->ring); |
| 138 | } |
| 139 | |
Jason Wang | 3528c1a | 2017-05-17 12:14:40 +0800 | [diff] [blame] | 140 | static inline int skb_array_consume_batched_bh(struct skb_array *a, |
| 141 | struct sk_buff **array, int n) |
| 142 | { |
| 143 | return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); |
| 144 | } |
| 145 | |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 146 | static inline int __skb_array_len_with_tag(struct sk_buff *skb) |
| 147 | { |
| 148 | if (likely(skb)) { |
| 149 | int len = skb->len; |
| 150 | |
| 151 | if (skb_vlan_tag_present(skb)) |
| 152 | len += VLAN_HLEN; |
| 153 | |
| 154 | return len; |
| 155 | } else { |
| 156 | return 0; |
| 157 | } |
| 158 | } |
| 159 | |
| 160 | static inline int skb_array_peek_len(struct skb_array *a) |
| 161 | { |
| 162 | return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag); |
| 163 | } |
| 164 | |
| 165 | static inline int skb_array_peek_len_irq(struct skb_array *a) |
| 166 | { |
| 167 | return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag); |
| 168 | } |
| 169 | |
| 170 | static inline int skb_array_peek_len_bh(struct skb_array *a) |
| 171 | { |
| 172 | return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag); |
| 173 | } |
| 174 | |
| 175 | static inline int skb_array_peek_len_any(struct skb_array *a) |
| 176 | { |
| 177 | return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); |
| 178 | } |
| 179 | |
| 180 | static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) |
| 181 | { |
| 182 | return ptr_ring_init(&a->ring, size, gfp); |
| 183 | } |
| 184 | |
Jason Wang | fd68ade | 2016-06-30 14:45:32 +0800 | [diff] [blame] | 185 | static void __skb_array_destroy_skb(void *ptr) |
Michael S. Tsirkin | 7d7072e | 2016-06-13 23:54:50 +0300 | [diff] [blame] | 186 | { |
| 187 | kfree_skb(ptr); |
| 188 | } |
| 189 | |
Jason Wang | 3acb696 | 2017-05-17 12:14:38 +0800 | [diff] [blame] | 190 | static inline void skb_array_unconsume(struct skb_array *a, |
| 191 | struct sk_buff **skbs, int n) |
| 192 | { |
| 193 | ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); |
| 194 | } |
| 195 | |
Jason Wang | fd68ade | 2016-06-30 14:45:32 +0800 | [diff] [blame] | 196 | static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) |
Michael S. Tsirkin | 7d7072e | 2016-06-13 23:54:50 +0300 | [diff] [blame] | 197 | { |
| 198 | return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); |
| 199 | } |
| 200 | |
Jason Wang | bf900b3 | 2016-06-30 14:45:34 +0800 | [diff] [blame] | 201 | static inline int skb_array_resize_multiple(struct skb_array **rings, |
Eric Dumazet | 81fbfe8 | 2017-08-16 10:36:47 -0700 | [diff] [blame] | 202 | int nrings, unsigned int size, |
| 203 | gfp_t gfp) |
Jason Wang | bf900b3 | 2016-06-30 14:45:34 +0800 | [diff] [blame] | 204 | { |
| 205 | BUILD_BUG_ON(offsetof(struct skb_array, ring)); |
| 206 | return ptr_ring_resize_multiple((struct ptr_ring **)rings, |
| 207 | nrings, size, gfp, |
| 208 | __skb_array_destroy_skb); |
| 209 | } |
| 210 | |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 211 | static inline void skb_array_cleanup(struct skb_array *a) |
| 212 | { |
Michael S. Tsirkin | 7d7072e | 2016-06-13 23:54:50 +0300 | [diff] [blame] | 213 | ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb); |
Michael S. Tsirkin | ad69f35 | 2016-06-13 23:54:41 +0300 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | #endif /* _LINUX_SKB_ARRAY_H */ |