Björn Töpel | dac09149 | 2018-05-18 14:00:21 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* AF_XDP internal functions |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 3 | * Copyright(c) 2018 Intel Corporation. |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_XDP_SOCK_H |
| 7 | #define _LINUX_XDP_SOCK_H |
| 8 | |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 9 | #include <linux/workqueue.h> |
| 10 | #include <linux/if_xdp.h> |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 11 | #include <linux/mutex.h> |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 12 | #include <linux/spinlock.h> |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 13 | #include <linux/mm.h> |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 14 | #include <net/sock.h> |
| 15 | |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 16 | struct net_device; |
| 17 | struct xsk_queue; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 18 | |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 19 | struct xdp_umem_page { |
| 20 | void *addr; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 21 | dma_addr_t dma; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 22 | }; |
| 23 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 24 | struct xdp_umem_fq_reuse { |
| 25 | u32 nentries; |
| 26 | u32 length; |
| 27 | u64 handles[]; |
| 28 | }; |
| 29 | |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 30 | struct xdp_umem { |
| 31 | struct xsk_queue *fq; |
| 32 | struct xsk_queue *cq; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 33 | struct xdp_umem_page *pages; |
Magnus Karlsson | 93ee30f | 2018-08-31 13:40:02 +0200 | [diff] [blame] | 34 | u64 chunk_mask; |
| 35 | u64 size; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 36 | u32 headroom; |
| 37 | u32 chunk_size_nohr; |
| 38 | struct user_struct *user; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 39 | unsigned long address; |
| 40 | refcount_t users; |
| 41 | struct work_struct work; |
Björn Töpel | 8aef734 | 2018-06-04 14:05:52 +0200 | [diff] [blame] | 42 | struct page **pgs; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 43 | u32 npgs; |
Björn Töpel | 50e74c0 | 2019-01-24 19:59:38 +0100 | [diff] [blame] | 44 | int id; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 45 | struct net_device *dev; |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 46 | struct xdp_umem_fq_reuse *fq_reuse; |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 47 | u16 queue_id; |
| 48 | bool zc; |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 49 | spinlock_t xsk_list_lock; |
| 50 | struct list_head xsk_list; |
Björn Töpel | e61e62b9 | 2018-06-04 14:05:51 +0200 | [diff] [blame] | 51 | }; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 52 | |
| 53 | struct xdp_sock { |
| 54 | /* struct sock must be the first member of struct xdp_sock */ |
| 55 | struct sock sk; |
Björn Töpel | b9b6b68 | 2018-05-02 13:01:25 +0200 | [diff] [blame] | 56 | struct xsk_queue *rx; |
| 57 | struct net_device *dev; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 58 | struct xdp_umem *umem; |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 59 | struct list_head flush_node; |
Magnus Karlsson | 965a990 | 2018-05-02 13:01:26 +0200 | [diff] [blame] | 60 | u16 queue_id; |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 61 | bool zc; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 62 | /* Protects multiple processes in the control path */ |
| 63 | struct mutex mutex; |
Jonathan Lemon | fada7fd | 2019-06-06 13:59:40 -0700 | [diff] [blame] | 64 | struct xsk_queue *tx ____cacheline_aligned_in_smp; |
| 65 | struct list_head list; |
Magnus Karlsson | a9744f7 | 2018-06-29 09:48:20 +0200 | [diff] [blame] | 66 | /* Mutual exclusion of NAPI TX thread and sendmsg error paths |
| 67 | * in the SKB destructor callback. |
| 68 | */ |
| 69 | spinlock_t tx_completion_lock; |
Ilya Maximets | bf0bdd1 | 2019-07-03 15:09:16 +0300 | [diff] [blame^] | 70 | /* Protects generic receive. */ |
| 71 | spinlock_t rx_lock; |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 72 | u64 rx_dropped; |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 73 | }; |
| 74 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 75 | struct xdp_buff; |
| 76 | #ifdef CONFIG_XDP_SOCKETS |
| 77 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
| 78 | int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
| 79 | void xsk_flush(struct xdp_sock *xs); |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 80 | bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 81 | /* Used from netdev driver */ |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 82 | bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); |
Björn Töpel | 173d3ad | 2018-06-04 14:05:55 +0200 | [diff] [blame] | 83 | u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); |
| 84 | void xsk_umem_discard_addr(struct xdp_umem *umem); |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 85 | void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); |
Maxim Mikityanskiy | 4bce4e5 | 2019-06-26 17:35:28 +0300 | [diff] [blame] | 86 | bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); |
Magnus Karlsson | ac98d8a | 2018-06-04 14:05:57 +0200 | [diff] [blame] | 87 | void xsk_umem_consume_tx_done(struct xdp_umem *umem); |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 88 | struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); |
| 89 | struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, |
| 90 | struct xdp_umem_fq_reuse *newq); |
| 91 | void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); |
Jakub Kicinski | 1661d34 | 2018-10-01 14:51:36 +0200 | [diff] [blame] | 92 | struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 93 | |
| 94 | static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) |
| 95 | { |
| 96 | return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); |
| 97 | } |
| 98 | |
| 99 | static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) |
| 100 | { |
| 101 | return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); |
| 102 | } |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 103 | |
| 104 | /* Reuse-queue aware version of FILL queue helpers */ |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 105 | static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) |
| 106 | { |
| 107 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 108 | |
| 109 | if (rq->length >= cnt) |
| 110 | return true; |
| 111 | |
| 112 | return xsk_umem_has_addrs(umem, cnt - rq->length); |
| 113 | } |
| 114 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 115 | static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) |
| 116 | { |
| 117 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 118 | |
| 119 | if (!rq->length) |
| 120 | return xsk_umem_peek_addr(umem, addr); |
| 121 | |
| 122 | *addr = rq->handles[rq->length - 1]; |
| 123 | return addr; |
| 124 | } |
| 125 | |
| 126 | static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) |
| 127 | { |
| 128 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 129 | |
| 130 | if (!rq->length) |
| 131 | xsk_umem_discard_addr(umem); |
| 132 | else |
| 133 | rq->length--; |
| 134 | } |
| 135 | |
| 136 | static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) |
| 137 | { |
| 138 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; |
| 139 | |
| 140 | rq->handles[rq->length++] = addr; |
| 141 | } |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 142 | #else |
| 143 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
| 144 | { |
| 145 | return -ENOTSUPP; |
| 146 | } |
| 147 | |
| 148 | static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
| 149 | { |
| 150 | return -ENOTSUPP; |
| 151 | } |
| 152 | |
| 153 | static inline void xsk_flush(struct xdp_sock *xs) |
| 154 | { |
| 155 | } |
Björn Töpel | fbfc504a | 2018-05-02 13:01:28 +0200 | [diff] [blame] | 156 | |
| 157 | static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) |
| 158 | { |
| 159 | return false; |
| 160 | } |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 161 | |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 162 | static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) |
| 163 | { |
| 164 | return false; |
| 165 | } |
| 166 | |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 167 | static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) |
| 168 | { |
| 169 | return NULL; |
| 170 | } |
| 171 | |
| 172 | static inline void xsk_umem_discard_addr(struct xdp_umem *umem) |
| 173 | { |
| 174 | } |
| 175 | |
| 176 | static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) |
| 177 | { |
| 178 | } |
| 179 | |
Maxim Mikityanskiy | 4bce4e5 | 2019-06-26 17:35:28 +0300 | [diff] [blame] | 180 | static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, |
| 181 | struct xdp_desc *desc) |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 182 | { |
| 183 | return false; |
| 184 | } |
| 185 | |
| 186 | static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) |
| 187 | { |
| 188 | } |
| 189 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 190 | static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) |
| 191 | { |
| 192 | return NULL; |
| 193 | } |
| 194 | |
| 195 | static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( |
| 196 | struct xdp_umem *umem, |
| 197 | struct xdp_umem_fq_reuse *newq) |
| 198 | { |
| 199 | return NULL; |
| 200 | } |
| 201 | static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) |
| 202 | { |
| 203 | } |
| 204 | |
Jakub Kicinski | 1661d34 | 2018-10-01 14:51:36 +0200 | [diff] [blame] | 205 | static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, |
| 206 | u16 queue_id) |
| 207 | { |
| 208 | return NULL; |
| 209 | } |
| 210 | |
Björn Töpel | 9025403 | 2018-08-28 14:44:27 +0200 | [diff] [blame] | 211 | static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) |
| 212 | { |
| 213 | return NULL; |
| 214 | } |
| 215 | |
| 216 | static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) |
| 217 | { |
| 218 | return 0; |
| 219 | } |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 220 | |
Maxim Mikityanskiy | d57d764 | 2019-06-26 17:35:24 +0300 | [diff] [blame] | 221 | static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) |
| 222 | { |
| 223 | return false; |
| 224 | } |
| 225 | |
Jakub Kicinski | f5bd913 | 2018-09-07 10:18:46 +0200 | [diff] [blame] | 226 | static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) |
| 227 | { |
| 228 | return NULL; |
| 229 | } |
| 230 | |
| 231 | static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) |
| 232 | { |
| 233 | } |
| 234 | |
| 235 | static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) |
| 236 | { |
| 237 | } |
| 238 | |
Björn Töpel | c497176 | 2018-05-02 13:01:27 +0200 | [diff] [blame] | 239 | #endif /* CONFIG_XDP_SOCKETS */ |
| 240 | |
Björn Töpel | c0c77d8 | 2018-05-02 13:01:23 +0200 | [diff] [blame] | 241 | #endif /* _LINUX_XDP_SOCK_H */ |