blob: 0a9b92b4f93a689aba87d2d370a4cd8daef45679 [file] [log] [blame]
Magnus Karlsson423f3832018-05-02 13:01:24 +02001/* SPDX-License-Identifier: GPL-2.0
2 * XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _LINUX_XSK_QUEUE_H
16#define _LINUX_XSK_QUEUE_H
17
18#include <linux/types.h>
19#include <linux/if_xdp.h>
20
21#include "xdp_umem_props.h"
22
Björn Töpelc4971762018-05-02 13:01:27 +020023#define RX_BATCH_SIZE 16
24
Magnus Karlsson423f3832018-05-02 13:01:24 +020025struct xsk_queue {
26 struct xdp_umem_props umem_props;
27 u32 ring_mask;
28 u32 nentries;
29 u32 prod_head;
30 u32 prod_tail;
31 u32 cons_head;
32 u32 cons_tail;
33 struct xdp_ring *ring;
34 u64 invalid_descs;
35};
36
Björn Töpelc4971762018-05-02 13:01:27 +020037/* Common functions operating for both RXTX and umem queues */
38
39static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
40{
41 u32 entries = q->prod_tail - q->cons_tail;
42
43 if (entries == 0) {
44 /* Refresh the local pointer */
45 q->prod_tail = READ_ONCE(q->ring->producer);
46 entries = q->prod_tail - q->cons_tail;
47 }
48
49 return (entries > dcnt) ? dcnt : entries;
50}
51
52static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
53{
54 u32 free_entries = q->nentries - (producer - q->cons_tail);
55
56 if (free_entries >= dcnt)
57 return free_entries;
58
59 /* Refresh the local tail pointer */
60 q->cons_tail = READ_ONCE(q->ring->consumer);
61 return q->nentries - (producer - q->cons_tail);
62}
63
64/* UMEM queue */
65
66static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
67{
68 if (unlikely(idx >= q->umem_props.nframes)) {
69 q->invalid_descs++;
70 return false;
71 }
72 return true;
73}
74
75static inline u32 *xskq_validate_id(struct xsk_queue *q)
76{
77 while (q->cons_tail != q->cons_head) {
78 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
79 unsigned int idx = q->cons_tail & q->ring_mask;
80
81 if (xskq_is_valid_id(q, ring->desc[idx]))
82 return &ring->desc[idx];
83
84 q->cons_tail++;
85 }
86
87 return NULL;
88}
89
90static inline u32 *xskq_peek_id(struct xsk_queue *q)
91{
92 struct xdp_umem_ring *ring;
93
94 if (q->cons_tail == q->cons_head) {
95 WRITE_ONCE(q->ring->consumer, q->cons_tail);
96 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
97
98 /* Order consumer and data */
99 smp_rmb();
100
101 return xskq_validate_id(q);
102 }
103
104 ring = (struct xdp_umem_ring *)q->ring;
105 return &ring->desc[q->cons_tail & q->ring_mask];
106}
107
108static inline void xskq_discard_id(struct xsk_queue *q)
109{
110 q->cons_tail++;
111 (void)xskq_validate_id(q);
112}
113
114/* Rx queue */
115
116static inline int xskq_produce_batch_desc(struct xsk_queue *q,
117 u32 id, u32 len, u16 offset)
118{
119 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
120 unsigned int idx;
121
122 if (xskq_nb_free(q, q->prod_head, 1) == 0)
123 return -ENOSPC;
124
125 idx = (q->prod_head++) & q->ring_mask;
126 ring->desc[idx].idx = id;
127 ring->desc[idx].len = len;
128 ring->desc[idx].offset = offset;
129
130 return 0;
131}
132
133static inline void xskq_produce_flush_desc(struct xsk_queue *q)
134{
135 /* Order producer and data */
136 smp_wmb();
137
138 q->prod_tail = q->prod_head,
139 WRITE_ONCE(q->ring->producer, q->prod_tail);
140}
141
142static inline bool xskq_empty_desc(struct xsk_queue *q)
143{
144 return (xskq_nb_free(q, q->prod_tail, 1) == q->nentries);
145}
146
Magnus Karlsson965a99092018-05-02 13:01:26 +0200147void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
Björn Töpelb9b6b682018-05-02 13:01:25 +0200148struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
Björn Töpelc4971762018-05-02 13:01:27 +0200149void xskq_destroy(struct xsk_queue *q_ops);
Magnus Karlsson423f3832018-05-02 13:01:24 +0200150
151#endif /* _LINUX_XSK_QUEUE_H */