Björn Töpel | dac09149 | 2018-05-18 14:00:21 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* AF_XDP internal functions |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 3 | * Copyright(c) 2018 Intel Corporation. |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_XDP_SOCK_H |
| 7 | #define _LINUX_XDP_SOCK_H |
| 8 | |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 9 | #include <linux/workqueue.h> |
| 10 | #include <linux/if_xdp.h> |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 11 | #include <linux/mutex.h> |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 12 | #include <linux/spinlock.h> |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 13 | #include <linux/mm.h> |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 14 | #include <net/sock.h> |
| 15 | |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 16 | struct net_device; |
| 17 | struct xsk_queue; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 18 | |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 19 | /* Masks for xdp_umem_page flags. |
| 20 | * The low 12-bits of the addr will be 0 since this is the page address, so we |
| 21 | * can use them for flags. |
| 22 | */ |
| 23 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 |
| 24 | #define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT) |
| 25 | |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 26 | struct xdp_umem_page { |
| 27 | void *addr; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 28 | dma_addr_t dma; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 29 | }; |
| 30 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 31 | struct xdp_umem_fq_reuse { |
| 32 | u32 nentries; |
| 33 | u32 length; |
| 34 | u64 handles[]; |
| 35 | }; |
| 36 | |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 37 | /* Flags for the umem flags field. |
| 38 | * |
| 39 | * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public |
| 40 | * flags. See inlude/uapi/include/linux/if_xdp.h. |
| 41 | */ |
| 42 | #define XDP_UMEM_USES_NEED_WAKEUP (1 << 1) |
Magnus Karlsson | 77cd0d7 | 2019-08-14 09:27:17 +0200 | [diff] [blame] | 43 | |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 44 | struct xdp_umem { |
| 45 | struct xsk_queue *fq; |
| 46 | struct xsk_queue *cq; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 47 | struct xdp_umem_page *pages; |
Magnus Karlsson | 93ee30f | 2018-08-31 13:40:02 +0200 | [diff] [blame] | 48 | u64 chunk_mask; |
| 49 | u64 size; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 50 | u32 headroom; |
| 51 | u32 chunk_size_nohr; |
| 52 | struct user_struct *user; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 53 | unsigned long address; |
| 54 | refcount_t users; |
| 55 | struct work_struct work; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 56 | struct page **pgs; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 57 | u32 npgs; |
Magnus Karlsson | 77cd0d7 | 2019-08-14 09:27:17 +0200 | [diff] [blame] | 58 | u16 queue_id; |
| 59 | u8 need_wakeup; |
| 60 | u8 flags; |
Björn Töpel | 50e74c0 | 2019-01-24 19:59:38 +0100 | [diff] [blame] | 61 | int id; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 62 | struct net_device *dev; |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 63 | struct xdp_umem_fq_reuse *fq_reuse; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 64 | bool zc; |
Magnus Karlsson | e4e5aef | 2020-05-04 15:33:51 +0200 | [diff] [blame^] | 65 | spinlock_t xsk_tx_list_lock; |
| 66 | struct list_head xsk_tx_list; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 67 | }; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 68 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame] | 69 | /* Nodes are linked in the struct xdp_sock map_list field, and used to |
| 70 | * track which maps a certain socket reside in. |
| 71 | */ |
Björn Töpel | d817991 | 2019-11-01 12:03:46 +0100 | [diff] [blame] | 72 | |
| 73 | struct xsk_map { |
| 74 | struct bpf_map map; |
Björn Töpel | d817991 | 2019-11-01 12:03:46 +0100 | [diff] [blame] | 75 | spinlock_t lock; /* Synchronize map updates */ |
| 76 | struct xdp_sock *xsk_map[]; |
| 77 | }; |
| 78 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame] | 79 | struct xsk_map_node { |
| 80 | struct list_head node; |
| 81 | struct xsk_map *map; |
| 82 | struct xdp_sock **map_entry; |
| 83 | }; |
| 84 | |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 85 | struct xdp_sock { |
| 86 | /* struct sock must be the first member of struct xdp_sock */ |
| 87 | struct sock sk; |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 88 | struct xsk_queue *rx; |
| 89 | struct net_device *dev; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 90 | struct xdp_umem *umem; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 91 | struct list_head flush_node; |
Magnus Karlsson | 965a990 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 92 | u16 queue_id; |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 93 | bool zc; |
Ilya Maximets | 455302d | 2019-06-28 11:04:07 +0300 | [diff] [blame] | 94 | enum { |
| 95 | XSK_READY = 0, |
| 96 | XSK_BOUND, |
| 97 | XSK_UNBOUND, |
| 98 | } state; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 99 | /* Protects multiple processes in the control path */ |
| 100 | struct mutex mutex; |
Jonathan Lemon | fada7fd | 2019-06-06 13:59:40 -0700 | [diff] [blame] | 101 | struct xsk_queue *tx ____cacheline_aligned_in_smp; |
| 102 | struct list_head list; |
Magnus Karlsson | a9744f7 | 2018-06-29 09:48:20 +0200 | [diff] [blame] | 103 | /* Mutual exclusion of NAPI TX thread and sendmsg error paths |
| 104 | * in the SKB destructor callback. |
| 105 | */ |
| 106 | spinlock_t tx_completion_lock; |
Ilya Maximets | bf0bdd1 | 2019-07-03 15:09:16 +0300 | [diff] [blame] | 107 | /* Protects generic receive. */ |
| 108 | spinlock_t rx_lock; |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 109 | u64 rx_dropped; |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame] | 110 | struct list_head map_list; |
| 111 | /* Protects map_list */ |
| 112 | spinlock_t map_list_lock; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 113 | }; |
| 114 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 115 | struct xdp_buff; |
| 116 | #ifdef CONFIG_XDP_SOCKETS |
| 117 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 118 | bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 119 | /* Used from netdev driver */ |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 120 | bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); |
Magnus Karlsson | 03896ef | 2019-12-19 13:39:27 +0100 | [diff] [blame] | 121 | bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); |
Magnus Karlsson | f8509aa | 2019-12-19 13:39:28 +0100 | [diff] [blame] | 122 | void xsk_umem_release_addr(struct xdp_umem *umem); |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 123 | void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); |
Maxim Mikityanskiy | 4bce4e5 | 2019-06-26 17:35:28 +0300 | [diff] [blame] | 124 | bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 125 | void xsk_umem_consume_tx_done(struct xdp_umem *umem); |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 126 | struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); |
| 127 | struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, |
| 128 | struct xdp_umem_fq_reuse *newq); |
| 129 | void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); |
Jakub Kicinski | 1661d34 | 2018-10-01 14:51:36 +0200 | [diff] [blame] | 130 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); |
Magnus Karlsson | 77cd0d7 | 2019-08-14 09:27:17 +0200 | [diff] [blame] | 131 | void xsk_set_rx_need_wakeup(struct xdp_umem *umem); |
| 132 | void xsk_set_tx_need_wakeup(struct xdp_umem *umem); |
| 133 | void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); |
| 134 | void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); |
| 135 | bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 136 | |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame] | 137 | void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, |
| 138 | struct xdp_sock **map_entry); |
| 139 | int xsk_map_inc(struct xsk_map *map); |
| 140 | void xsk_map_put(struct xsk_map *map); |
Björn Töpel | e312b9e | 2019-12-19 07:10:02 +0100 | [diff] [blame] | 141 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); |
| 142 | void __xsk_map_flush(void); |
Björn Töpel | d817991 | 2019-11-01 12:03:46 +0100 | [diff] [blame] | 143 | |
| 144 | static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, |
| 145 | u32 key) |
| 146 | { |
| 147 | struct xsk_map *m = container_of(map, struct xsk_map, map); |
| 148 | struct xdp_sock *xs; |
| 149 | |
| 150 | if (key >= map->max_entries) |
| 151 | return NULL; |
| 152 | |
| 153 | xs = READ_ONCE(m->xsk_map[key]); |
| 154 | return xs; |
| 155 | } |
Björn Töpel | 0402acd | 2019-08-15 11:30:13 +0200 | [diff] [blame] | 156 | |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 157 | static inline u64 xsk_umem_extract_addr(u64 addr) |
| 158 | { |
| 159 | return addr & XSK_UNALIGNED_BUF_ADDR_MASK; |
| 160 | } |
| 161 | |
| 162 | static inline u64 xsk_umem_extract_offset(u64 addr) |
| 163 | { |
| 164 | return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; |
| 165 | } |
| 166 | |
| 167 | static inline u64 xsk_umem_add_offset_to_addr(u64 addr) |
| 168 | { |
| 169 | return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); |
| 170 | } |
| 171 | |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 172 | static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) |
| 173 | { |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 174 | unsigned long page_addr; |
| 175 | |
| 176 | addr = xsk_umem_add_offset_to_addr(addr); |
| 177 | page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; |
| 178 | |
| 179 | return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) |
| 183 | { |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 184 | addr = xsk_umem_add_offset_to_addr(addr); |
| 185 | |
| 186 | return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 187 | } |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 188 | |
| 189 | /* Reuse-queue aware version of FILL queue helpers */ |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 190 | static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) |
| 191 | { |
| 192 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 193 | |
| 194 | if (rq->length >= cnt) |
| 195 | return true; |
| 196 | |
| 197 | return xsk_umem_has_addrs(umem, cnt - rq->length); |
| 198 | } |
| 199 | |
Magnus Karlsson | 03896ef | 2019-12-19 13:39:27 +0100 | [diff] [blame] | 200 | static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 201 | { |
| 202 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 203 | |
| 204 | if (!rq->length) |
| 205 | return xsk_umem_peek_addr(umem, addr); |
| 206 | |
| 207 | *addr = rq->handles[rq->length - 1]; |
| 208 | return addr; |
| 209 | } |
| 210 | |
Magnus Karlsson | f8509aa | 2019-12-19 13:39:28 +0100 | [diff] [blame] | 211 | static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 212 | { |
| 213 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 214 | |
| 215 | if (!rq->length) |
Magnus Karlsson | f8509aa | 2019-12-19 13:39:28 +0100 | [diff] [blame] | 216 | xsk_umem_release_addr(umem); |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 217 | else |
| 218 | rq->length--; |
| 219 | } |
| 220 | |
| 221 | static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) |
| 222 | { |
| 223 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 224 | |
| 225 | rq->handles[rq->length++] = addr; |
| 226 | } |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 227 | |
| 228 | /* Handle the offset appropriately depending on aligned or unaligned mode. |
| 229 | * For unaligned mode, we store the offset in the upper 16-bits of the address. |
| 230 | * For aligned mode, we simply add the offset to the address. |
| 231 | */ |
| 232 | static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, |
| 233 | u64 offset) |
| 234 | { |
| 235 | if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) |
| 236 | return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); |
| 237 | else |
| 238 | return address + offset; |
| 239 | } |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 240 | #else |
| 241 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
| 242 | { |
| 243 | return -ENOTSUPP; |
| 244 | } |
| 245 | |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 246 | static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) |
| 247 | { |
| 248 | return false; |
| 249 | } |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 250 | |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 251 | static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) |
| 252 | { |
| 253 | return false; |
| 254 | } |
| 255 | |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 256 | static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) |
| 257 | { |
| 258 | return NULL; |
| 259 | } |
| 260 | |
Magnus Karlsson | f8509aa | 2019-12-19 13:39:28 +0100 | [diff] [blame] | 261 | static inline void xsk_umem_release_addr(struct xdp_umem *umem) |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 262 | { |
| 263 | } |
| 264 | |
| 265 | static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) |
| 266 | { |
| 267 | } |
| 268 | |
Maxim Mikityanskiy | 4bce4e5 | 2019-06-26 17:35:28 +0300 | [diff] [blame] | 269 | static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, |
| 270 | struct xdp_desc *desc) |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 271 | { |
| 272 | return false; |
| 273 | } |
| 274 | |
| 275 | static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) |
| 276 | { |
| 277 | } |
| 278 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 279 | static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) |
| 280 | { |
| 281 | return NULL; |
| 282 | } |
| 283 | |
| 284 | static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( |
| 285 | struct xdp_umem *umem, |
| 286 | struct xdp_umem_fq_reuse *newq) |
| 287 | { |
| 288 | return NULL; |
| 289 | } |
| 290 | static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) |
| 291 | { |
| 292 | } |
| 293 | |
Jakub Kicinski | 1661d34 | 2018-10-01 14:51:36 +0200 | [diff] [blame] | 294 | static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, |
| 295 | u16 queue_id) |
| 296 | { |
| 297 | return NULL; |
| 298 | } |
| 299 | |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 300 | static inline u64 xsk_umem_extract_addr(u64 addr) |
| 301 | { |
| 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | static inline u64 xsk_umem_extract_offset(u64 addr) |
| 306 | { |
| 307 | return 0; |
| 308 | } |
| 309 | |
| 310 | static inline u64 xsk_umem_add_offset_to_addr(u64 addr) |
| 311 | { |
| 312 | return 0; |
| 313 | } |
| 314 | |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 315 | static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) |
| 316 | { |
| 317 | return NULL; |
| 318 | } |
| 319 | |
| 320 | static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) |
| 321 | { |
| 322 | return 0; |
| 323 | } |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 324 | |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 325 | static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) |
| 326 | { |
| 327 | return false; |
| 328 | } |
| 329 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 330 | static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) |
| 331 | { |
| 332 | return NULL; |
| 333 | } |
| 334 | |
Magnus Karlsson | f8509aa | 2019-12-19 13:39:28 +0100 | [diff] [blame] | 335 | static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 336 | { |
| 337 | } |
| 338 | |
| 339 | static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) |
| 340 | { |
| 341 | } |
| 342 | |
Magnus Karlsson | 77cd0d7 | 2019-08-14 09:27:17 +0200 | [diff] [blame] | 343 | static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) |
| 344 | { |
| 345 | } |
| 346 | |
| 347 | static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) |
| 348 | { |
| 349 | } |
| 350 | |
| 351 | static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) |
| 352 | { |
| 353 | } |
| 354 | |
| 355 | static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) |
| 356 | { |
| 357 | } |
| 358 | |
| 359 | static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) |
| 360 | { |
| 361 | return false; |
| 362 | } |
| 363 | |
Kevin Laatz | c05cd36 | 2019-08-27 02:25:22 +0000 | [diff] [blame] | 364 | static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, |
| 365 | u64 offset) |
| 366 | { |
| 367 | return 0; |
| 368 | } |
| 369 | |
Björn Töpel | e312b9e | 2019-12-19 07:10:02 +0100 | [diff] [blame] | 370 | static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
Björn Töpel | d817991 | 2019-11-01 12:03:46 +0100 | [diff] [blame] | 371 | { |
| 372 | return -EOPNOTSUPP; |
| 373 | } |
| 374 | |
Björn Töpel | e312b9e | 2019-12-19 07:10:02 +0100 | [diff] [blame] | 375 | static inline void __xsk_map_flush(void) |
Björn Töpel | d817991 | 2019-11-01 12:03:46 +0100 | [diff] [blame] | 376 | { |
| 377 | } |
| 378 | |
| 379 | static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, |
| 380 | u32 key) |
| 381 | { |
| 382 | return NULL; |
| 383 | } |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 384 | #endif /* CONFIG_XDP_SOCKETS */ |
| 385 | |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 386 | #endif /* _LINUX_XDP_SOCK_H */ |