Magnus Karlsson | 1cad078 | 2019-02-21 10:21:26 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ |
| 2 | |
| 3 | /* |
| 4 | * AF_XDP user-space access library. |
| 5 | * |
| 6 | * Copyright(c) 2018 - 2019 Intel Corporation. |
| 7 | * |
| 8 | * Author(s): Magnus Karlsson <magnus.karlsson@intel.com> |
| 9 | */ |
| 10 | |
| 11 | #ifndef __LIBBPF_XSK_H |
| 12 | #define __LIBBPF_XSK_H |
| 13 | |
| 14 | #include <stdio.h> |
| 15 | #include <stdint.h> |
| 16 | #include <linux/if_xdp.h> |
| 17 | |
| 18 | #include "libbpf.h" |
| 19 | |
| 20 | #ifdef __cplusplus |
| 21 | extern "C" { |
| 22 | #endif |
| 23 | |
| 24 | /* Do not access these members directly. Use the functions below. */ |
| 25 | #define DEFINE_XSK_RING(name) \ |
| 26 | struct name { \ |
| 27 | __u32 cached_prod; \ |
| 28 | __u32 cached_cons; \ |
| 29 | __u32 mask; \ |
| 30 | __u32 size; \ |
| 31 | __u32 *producer; \ |
| 32 | __u32 *consumer; \ |
| 33 | void *ring; \ |
| 34 | } |
| 35 | |
| 36 | DEFINE_XSK_RING(xsk_ring_prod); |
| 37 | DEFINE_XSK_RING(xsk_ring_cons); |
| 38 | |
Magnus Karlsson | d5e63fd | 2019-04-16 14:58:09 +0200 | [diff] [blame^] | 39 | /* For a detailed explanation on the memory barriers associated with the |
| 40 | * ring, please take a look at net/xdp/xsk_queue.h. |
| 41 | */ |
| 42 | |
Magnus Karlsson | 1cad078 | 2019-02-21 10:21:26 +0100 | [diff] [blame] | 43 | struct xsk_umem; |
| 44 | struct xsk_socket; |
| 45 | |
| 46 | static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, |
| 47 | __u32 idx) |
| 48 | { |
| 49 | __u64 *addrs = (__u64 *)fill->ring; |
| 50 | |
| 51 | return &addrs[idx & fill->mask]; |
| 52 | } |
| 53 | |
| 54 | static inline const __u64 * |
| 55 | xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) |
| 56 | { |
| 57 | const __u64 *addrs = (const __u64 *)comp->ring; |
| 58 | |
| 59 | return &addrs[idx & comp->mask]; |
| 60 | } |
| 61 | |
| 62 | static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, |
| 63 | __u32 idx) |
| 64 | { |
| 65 | struct xdp_desc *descs = (struct xdp_desc *)tx->ring; |
| 66 | |
| 67 | return &descs[idx & tx->mask]; |
| 68 | } |
| 69 | |
| 70 | static inline const struct xdp_desc * |
| 71 | xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) |
| 72 | { |
| 73 | const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; |
| 74 | |
| 75 | return &descs[idx & rx->mask]; |
| 76 | } |
| 77 | |
| 78 | static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) |
| 79 | { |
| 80 | __u32 free_entries = r->cached_cons - r->cached_prod; |
| 81 | |
| 82 | if (free_entries >= nb) |
| 83 | return free_entries; |
| 84 | |
| 85 | /* Refresh the local tail pointer. |
| 86 | * cached_cons is r->size bigger than the real consumer pointer so |
| 87 | * that this addition can be avoided in the more frequently |
| 88 | * executed code that computs free_entries in the beginning of |
| 89 | * this function. Without this optimization it whould have been |
| 90 | * free_entries = r->cached_prod - r->cached_cons + r->size. |
| 91 | */ |
| 92 | r->cached_cons = *r->consumer + r->size; |
| 93 | |
| 94 | return r->cached_cons - r->cached_prod; |
| 95 | } |
| 96 | |
| 97 | static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) |
| 98 | { |
| 99 | __u32 entries = r->cached_prod - r->cached_cons; |
| 100 | |
| 101 | if (entries == 0) { |
| 102 | r->cached_prod = *r->producer; |
| 103 | entries = r->cached_prod - r->cached_cons; |
| 104 | } |
| 105 | |
| 106 | return (entries > nb) ? nb : entries; |
| 107 | } |
| 108 | |
| 109 | static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod, |
| 110 | size_t nb, __u32 *idx) |
| 111 | { |
| 112 | if (unlikely(xsk_prod_nb_free(prod, nb) < nb)) |
| 113 | return 0; |
| 114 | |
| 115 | *idx = prod->cached_prod; |
| 116 | prod->cached_prod += nb; |
| 117 | |
| 118 | return nb; |
| 119 | } |
| 120 | |
| 121 | static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb) |
| 122 | { |
Magnus Karlsson | d5e63fd | 2019-04-16 14:58:09 +0200 | [diff] [blame^] | 123 | /* Make sure everything has been written to the ring before indicating |
| 124 | * this to the kernel by writing the producer pointer. |
Magnus Karlsson | 1cad078 | 2019-02-21 10:21:26 +0100 | [diff] [blame] | 125 | */ |
| 126 | smp_wmb(); |
| 127 | |
| 128 | *prod->producer += nb; |
| 129 | } |
| 130 | |
| 131 | static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons, |
| 132 | size_t nb, __u32 *idx) |
| 133 | { |
| 134 | size_t entries = xsk_cons_nb_avail(cons, nb); |
| 135 | |
| 136 | if (likely(entries > 0)) { |
| 137 | /* Make sure we do not speculatively read the data before |
| 138 | * we have received the packet buffers from the ring. |
| 139 | */ |
| 140 | smp_rmb(); |
| 141 | |
| 142 | *idx = cons->cached_cons; |
| 143 | cons->cached_cons += entries; |
| 144 | } |
| 145 | |
| 146 | return entries; |
| 147 | } |
| 148 | |
| 149 | static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb) |
| 150 | { |
Magnus Karlsson | d5e63fd | 2019-04-16 14:58:09 +0200 | [diff] [blame^] | 151 | /* Make sure data has been read before indicating we are done |
| 152 | * with the entries by updating the consumer pointer. |
| 153 | */ |
| 154 | smp_mb(); |
| 155 | |
Magnus Karlsson | 1cad078 | 2019-02-21 10:21:26 +0100 | [diff] [blame] | 156 | *cons->consumer += nb; |
| 157 | } |
| 158 | |
| 159 | static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) |
| 160 | { |
| 161 | return &((char *)umem_area)[addr]; |
| 162 | } |
| 163 | |
| 164 | LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem); |
| 165 | LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk); |
| 166 | |
| 167 | #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 |
| 168 | #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 |
| 169 | #define XSK_UMEM__DEFAULT_FRAME_SHIFT 11 /* 2048 bytes */ |
| 170 | #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) |
| 171 | #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 |
| 172 | |
| 173 | struct xsk_umem_config { |
| 174 | __u32 fill_size; |
| 175 | __u32 comp_size; |
| 176 | __u32 frame_size; |
| 177 | __u32 frame_headroom; |
| 178 | }; |
| 179 | |
| 180 | /* Flags for the libbpf_flags field. */ |
| 181 | #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0) |
| 182 | |
| 183 | struct xsk_socket_config { |
| 184 | __u32 rx_size; |
| 185 | __u32 tx_size; |
| 186 | __u32 libbpf_flags; |
| 187 | __u32 xdp_flags; |
| 188 | __u16 bind_flags; |
| 189 | }; |
| 190 | |
| 191 | /* Set config to NULL to get the default configuration. */ |
| 192 | LIBBPF_API int xsk_umem__create(struct xsk_umem **umem, |
| 193 | void *umem_area, __u64 size, |
| 194 | struct xsk_ring_prod *fill, |
| 195 | struct xsk_ring_cons *comp, |
| 196 | const struct xsk_umem_config *config); |
| 197 | LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk, |
| 198 | const char *ifname, __u32 queue_id, |
| 199 | struct xsk_umem *umem, |
| 200 | struct xsk_ring_cons *rx, |
| 201 | struct xsk_ring_prod *tx, |
| 202 | const struct xsk_socket_config *config); |
| 203 | |
| 204 | /* Returns 0 for success and -EBUSY if the umem is still in use. */ |
| 205 | LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem); |
| 206 | LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk); |
| 207 | |
| 208 | #ifdef __cplusplus |
| 209 | } /* extern "C" */ |
| 210 | #endif |
| 211 | |
| 212 | #endif /* __LIBBPF_XSK_H */ |