blob: 540ed75e44821cc62e1ee927f9ae3c196487c862 [file] [log] [blame]
Björn Töpel2b434702020-05-20 21:20:53 +02001// SPDX-License-Identifier: GPL-2.0
2
3#include <net/xsk_buff_pool.h>
4#include <net/xdp_sock.h>
5#include <linux/dma-direct.h>
6#include <linux/dma-noncoherent.h>
7#include <linux/swiotlb.h>
8
9#include "xsk_queue.h"
10
Björn Töpel2b434702020-05-20 21:20:53 +020011static void xp_addr_unmap(struct xsk_buff_pool *pool)
12{
13 vunmap(pool->addrs);
14}
15
16static int xp_addr_map(struct xsk_buff_pool *pool,
17 struct page **pages, u32 nr_pages)
18{
19 pool->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
20 if (!pool->addrs)
21 return -ENOMEM;
22 return 0;
23}
24
25void xp_destroy(struct xsk_buff_pool *pool)
26{
27 if (!pool)
28 return;
29
30 xp_addr_unmap(pool);
31 kvfree(pool->heads);
32 kvfree(pool);
33}
34
35struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
36 u32 chunk_size, u32 headroom, u64 size,
37 bool unaligned)
38{
39 struct xsk_buff_pool *pool;
40 struct xdp_buff_xsk *xskb;
41 int err;
42 u32 i;
43
44 pool = kvzalloc(struct_size(pool, free_heads, chunks), GFP_KERNEL);
45 if (!pool)
46 goto out;
47
48 pool->heads = kvcalloc(chunks, sizeof(*pool->heads), GFP_KERNEL);
49 if (!pool->heads)
50 goto out;
51
52 pool->chunk_mask = ~((u64)chunk_size - 1);
53 pool->addrs_cnt = size;
54 pool->heads_cnt = chunks;
55 pool->free_heads_cnt = chunks;
56 pool->headroom = headroom;
57 pool->chunk_size = chunk_size;
58 pool->cheap_dma = true;
59 pool->unaligned = unaligned;
60 pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
61 INIT_LIST_HEAD(&pool->free_list);
62
63 for (i = 0; i < pool->free_heads_cnt; i++) {
64 xskb = &pool->heads[i];
65 xskb->pool = pool;
66 xskb->xdp.frame_sz = chunk_size - headroom;
67 pool->free_heads[i] = xskb;
68 }
69
70 err = xp_addr_map(pool, pages, nr_pages);
71 if (!err)
72 return pool;
73
74out:
75 xp_destroy(pool);
76 return NULL;
77}
78
79void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq)
80{
81 pool->fq = fq;
82}
83
84void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
85{
86 u32 i;
87
88 for (i = 0; i < pool->heads_cnt; i++)
89 pool->heads[i].xdp.rxq = rxq;
90}
91EXPORT_SYMBOL(xp_set_rxq_info);
92
93void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
94{
95 dma_addr_t *dma;
96 u32 i;
97
98 if (pool->dma_pages_cnt == 0)
99 return;
100
101 for (i = 0; i < pool->dma_pages_cnt; i++) {
102 dma = &pool->dma_pages[i];
103 if (*dma) {
104 dma_unmap_page_attrs(pool->dev, *dma, PAGE_SIZE,
105 DMA_BIDIRECTIONAL, attrs);
106 *dma = 0;
107 }
108 }
109
110 kvfree(pool->dma_pages);
111 pool->dma_pages_cnt = 0;
112 pool->dev = NULL;
113}
114EXPORT_SYMBOL(xp_dma_unmap);
115
116static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
117{
118 u32 i;
119
120 for (i = 0; i < pool->dma_pages_cnt - 1; i++) {
121 if (pool->dma_pages[i] + PAGE_SIZE == pool->dma_pages[i + 1])
122 pool->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
123 else
124 pool->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
125 }
126}
127
128static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
129{
130#if defined(CONFIG_SWIOTLB)
131 phys_addr_t paddr;
132 u32 i;
133
134 for (i = 0; i < pool->dma_pages_cnt; i++) {
135 paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
136 if (is_swiotlb_buffer(paddr))
137 return false;
138 }
139#endif
140 return true;
141}
142
143static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
144{
145#if defined(CONFIG_HAS_DMA)
146 const struct dma_map_ops *ops = get_dma_ops(pool->dev);
147
148 if (ops) {
149 return !ops->sync_single_for_cpu &&
150 !ops->sync_single_for_device;
151 }
152
153 if (!dma_is_direct(ops))
154 return false;
155
156 if (!xp_check_swiotlb_dma(pool))
157 return false;
158
159 if (!dev_is_dma_coherent(pool->dev)) {
160#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
161 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
162 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
163 return false;
164#endif
165 }
166#endif
167 return true;
168}
169
170int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
171 unsigned long attrs, struct page **pages, u32 nr_pages)
172{
173 dma_addr_t dma;
174 u32 i;
175
176 pool->dma_pages = kvcalloc(nr_pages, sizeof(*pool->dma_pages),
177 GFP_KERNEL);
178 if (!pool->dma_pages)
179 return -ENOMEM;
180
181 pool->dev = dev;
182 pool->dma_pages_cnt = nr_pages;
183
184 for (i = 0; i < pool->dma_pages_cnt; i++) {
185 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
186 DMA_BIDIRECTIONAL, attrs);
187 if (dma_mapping_error(dev, dma)) {
188 xp_dma_unmap(pool, attrs);
189 return -ENOMEM;
190 }
191 pool->dma_pages[i] = dma;
192 }
193
194 if (pool->unaligned)
195 xp_check_dma_contiguity(pool);
196
197 pool->dev = dev;
198 pool->cheap_dma = xp_check_cheap_dma(pool);
199 return 0;
200}
201EXPORT_SYMBOL(xp_dma_map);
202
Björn Töpel2b434702020-05-20 21:20:53 +0200203static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
204 u64 addr)
205{
206 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
207}
208
Björn Töpel2b434702020-05-20 21:20:53 +0200209static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
210{
211 *addr = xp_unaligned_extract_addr(*addr);
212 if (*addr >= pool->addrs_cnt ||
213 *addr + pool->chunk_size > pool->addrs_cnt ||
214 xp_addr_crosses_non_contig_pg(pool, *addr))
215 return false;
216 return true;
217}
218
219static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
220{
221 *addr = xp_aligned_extract_addr(pool, *addr);
222 return *addr < pool->addrs_cnt;
223}
224
225static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
226{
227 struct xdp_buff_xsk *xskb;
228 u64 addr;
229 bool ok;
230
231 if (pool->free_heads_cnt == 0)
232 return NULL;
233
234 xskb = pool->free_heads[--pool->free_heads_cnt];
235
236 for (;;) {
237 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
238 xp_release(xskb);
239 return NULL;
240 }
241
242 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
243 xp_check_aligned(pool, &addr);
244 if (!ok) {
245 pool->fq->invalid_descs++;
246 xskq_cons_release(pool->fq);
247 continue;
248 }
249 break;
250 }
251 xskq_cons_release(pool->fq);
252
253 xskb->orig_addr = addr;
254 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
255 if (pool->dma_pages_cnt) {
256 xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
257 ~XSK_NEXT_PG_CONTIG_MASK) +
258 (addr & ~PAGE_MASK);
259 xskb->dma = xskb->frame_dma + pool->headroom +
260 XDP_PACKET_HEADROOM;
261 }
262 return xskb;
263}
264
265struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
266{
267 struct xdp_buff_xsk *xskb;
268
269 if (!pool->free_list_cnt) {
270 xskb = __xp_alloc(pool);
271 if (!xskb)
272 return NULL;
273 } else {
274 pool->free_list_cnt--;
275 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
276 free_list_node);
277 list_del(&xskb->free_list_node);
278 }
279
280 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
281 xskb->xdp.data_meta = xskb->xdp.data;
282
283 if (!pool->cheap_dma) {
284 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
285 pool->frame_len,
286 DMA_BIDIRECTIONAL);
287 }
288 return &xskb->xdp;
289}
290EXPORT_SYMBOL(xp_alloc);
291
292bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
293{
294 if (pool->free_list_cnt >= count)
295 return true;
296 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
297}
298EXPORT_SYMBOL(xp_can_alloc);
299
300void xp_free(struct xdp_buff_xsk *xskb)
301{
302 xskb->pool->free_list_cnt++;
303 list_add(&xskb->free_list_node, &xskb->pool->free_list);
304}
305EXPORT_SYMBOL(xp_free);
306
Björn Töpel2b434702020-05-20 21:20:53 +0200307void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
308{
309 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
310 return pool->addrs + addr;
311}
312EXPORT_SYMBOL(xp_raw_get_data);
313
314dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
315{
316 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
317 return (pool->dma_pages[addr >> PAGE_SHIFT] &
318 ~XSK_NEXT_PG_CONTIG_MASK) +
319 (addr & ~PAGE_MASK);
320}
321EXPORT_SYMBOL(xp_raw_get_dma);
322
Björn Töpel26062b12020-05-20 21:21:02 +0200323void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
Björn Töpel2b434702020-05-20 21:20:53 +0200324{
Björn Töpel2b434702020-05-20 21:20:53 +0200325 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
326 xskb->pool->frame_len, DMA_BIDIRECTIONAL);
327}
Björn Töpel26062b12020-05-20 21:21:02 +0200328EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
Björn Töpel2b434702020-05-20 21:20:53 +0200329
Björn Töpel26062b12020-05-20 21:21:02 +0200330void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
331 size_t size)
Björn Töpel2b434702020-05-20 21:20:53 +0200332{
Björn Töpel2b434702020-05-20 21:20:53 +0200333 dma_sync_single_range_for_device(pool->dev, dma, 0,
334 size, DMA_BIDIRECTIONAL);
335}
Björn Töpel26062b12020-05-20 21:21:02 +0200336EXPORT_SYMBOL(xp_dma_sync_for_device_slow);