Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* Copyright(c) 2020 Intel Corporation. */ |
| 3 | |
| 4 | #ifndef XSK_BUFF_POOL_H_ |
| 5 | #define XSK_BUFF_POOL_H_ |
| 6 | |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 7 | #include <linux/if_xdp.h> |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 8 | #include <linux/types.h> |
| 9 | #include <linux/dma-mapping.h> |
Magnus Karlsson | 94033cd | 2021-09-22 09:56:06 +0200 | [diff] [blame] | 10 | #include <linux/bpf.h> |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 11 | #include <net/xdp.h> |
| 12 | |
| 13 | struct xsk_buff_pool; |
| 14 | struct xdp_rxq_info; |
| 15 | struct xsk_queue; |
| 16 | struct xdp_desc; |
Magnus Karlsson | 1742b3d | 2020-08-28 10:26:15 +0200 | [diff] [blame] | 17 | struct xdp_umem; |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 18 | struct xdp_sock; |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 19 | struct device; |
| 20 | struct page; |
| 21 | |
| 22 | struct xdp_buff_xsk { |
| 23 | struct xdp_buff xdp; |
| 24 | dma_addr_t dma; |
| 25 | dma_addr_t frame_dma; |
| 26 | struct xsk_buff_pool *pool; |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 27 | u64 orig_addr; |
| 28 | struct list_head free_list_node; |
| 29 | }; |
| 30 | |
Magnus Karlsson | 921b686 | 2020-08-28 10:26:22 +0200 | [diff] [blame] | 31 | struct xsk_dma_map { |
| 32 | dma_addr_t *dma_pages; |
| 33 | struct device *dev; |
| 34 | struct net_device *netdev; |
| 35 | refcount_t users; |
| 36 | struct list_head list; /* Protected by the RTNL_LOCK */ |
| 37 | u32 dma_pages_cnt; |
| 38 | bool dma_need_sync; |
| 39 | }; |
| 40 | |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 41 | struct xsk_buff_pool { |
Magnus Karlsson | 8ef4e27 | 2020-08-28 10:26:23 +0200 | [diff] [blame] | 42 | /* Members only used in the control path first. */ |
| 43 | struct device *dev; |
| 44 | struct net_device *netdev; |
| 45 | struct list_head xsk_tx_list; |
| 46 | /* Protects modifications to the xsk_tx_list */ |
| 47 | spinlock_t xsk_tx_list_lock; |
| 48 | refcount_t users; |
| 49 | struct xdp_umem *umem; |
| 50 | struct work_struct work; |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 51 | struct list_head free_list; |
Magnus Karlsson | 8ef4e27 | 2020-08-28 10:26:23 +0200 | [diff] [blame] | 52 | u32 heads_cnt; |
| 53 | u16 queue_id; |
| 54 | |
| 55 | /* Data path members as close to free_heads at the end as possible. */ |
| 56 | struct xsk_queue *fq ____cacheline_aligned_in_smp; |
| 57 | struct xsk_queue *cq; |
Magnus Karlsson | 921b686 | 2020-08-28 10:26:22 +0200 | [diff] [blame] | 58 | /* For performance reasons, each buff pool has its own array of dma_pages |
| 59 | * even when they are identical. |
| 60 | */ |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 61 | dma_addr_t *dma_pages; |
| 62 | struct xdp_buff_xsk *heads; |
| 63 | u64 chunk_mask; |
| 64 | u64 addrs_cnt; |
| 65 | u32 free_list_cnt; |
| 66 | u32 dma_pages_cnt; |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 67 | u32 free_heads_cnt; |
| 68 | u32 headroom; |
| 69 | u32 chunk_size; |
Magnus Karlsson | 94033cd | 2021-09-22 09:56:06 +0200 | [diff] [blame] | 70 | u32 chunk_shift; |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 71 | u32 frame_len; |
Magnus Karlsson | c2d3d6a | 2020-08-28 10:26:19 +0200 | [diff] [blame] | 72 | u8 cached_need_wakeup; |
| 73 | bool uses_need_wakeup; |
Christoph Hellwig | 91d5b70 | 2020-06-29 15:03:57 +0200 | [diff] [blame] | 74 | bool dma_need_sync; |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 75 | bool unaligned; |
| 76 | void *addrs; |
Magnus Karlsson | f09ced4 | 2020-12-18 14:45:24 +0100 | [diff] [blame] | 77 | /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect: |
| 78 | * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when |
| 79 | * sockets share a single cq when the same netdev and queue id is shared. |
| 80 | */ |
| 81 | spinlock_t cq_lock; |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 82 | struct xdp_buff_xsk *free_heads[]; |
| 83 | }; |
| 84 | |
Magnus Karlsson | 94033cd | 2021-09-22 09:56:06 +0200 | [diff] [blame] | 85 | /* Masks for xdp_umem_page flags. |
| 86 | * The low 12-bits of the addr will be 0 since this is the page address, so we |
| 87 | * can use them for flags. |
| 88 | */ |
| 89 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 |
| 90 | #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) |
| 91 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 92 | /* AF_XDP core. */ |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 93 | struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, |
| 94 | struct xdp_umem *umem); |
| 95 | int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, |
| 96 | u16 queue_id, u16 flags); |
Magnus Karlsson | b5aea28 | 2020-08-28 10:26:25 +0200 | [diff] [blame] | 97 | int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, |
| 98 | struct net_device *dev, u16 queue_id); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 99 | void xp_destroy(struct xsk_buff_pool *pool); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 100 | void xp_get_pool(struct xsk_buff_pool *pool); |
Magnus Karlsson | e5e1a4b | 2020-10-27 13:32:01 +0100 | [diff] [blame] | 101 | bool xp_put_pool(struct xsk_buff_pool *pool); |
Magnus Karlsson | 1c1efc2 | 2020-08-28 10:26:17 +0200 | [diff] [blame] | 102 | void xp_clear_dev(struct xsk_buff_pool *pool); |
Magnus Karlsson | a5aa8e5 | 2020-08-28 10:26:20 +0200 | [diff] [blame] | 103 | void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); |
| 104 | void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 105 | |
| 106 | /* AF_XDP, and XDP core. */ |
| 107 | void xp_free(struct xdp_buff_xsk *xskb); |
| 108 | |
Magnus Karlsson | 94033cd | 2021-09-22 09:56:06 +0200 | [diff] [blame] | 109 | static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, |
| 110 | u64 addr) |
| 111 | { |
| 112 | xskb->orig_addr = addr; |
| 113 | xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; |
| 114 | } |
| 115 | |
| 116 | static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, |
| 117 | dma_addr_t *dma_pages, u64 addr) |
| 118 | { |
| 119 | xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) + |
| 120 | (addr & ~PAGE_MASK); |
| 121 | xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM; |
| 122 | } |
| 123 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 124 | /* AF_XDP ZC drivers, via xdp_sock_buff.h */ |
| 125 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); |
| 126 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, |
| 127 | unsigned long attrs, struct page **pages, u32 nr_pages); |
| 128 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); |
| 129 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); |
Magnus Karlsson | 47e4075 | 2021-09-22 09:56:02 +0200 | [diff] [blame] | 130 | u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max); |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 131 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); |
| 132 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); |
| 133 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 134 | static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) |
| 135 | { |
| 136 | return xskb->dma; |
| 137 | } |
| 138 | |
| 139 | static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) |
| 140 | { |
| 141 | return xskb->frame_dma; |
| 142 | } |
| 143 | |
| 144 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); |
| 145 | static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) |
| 146 | { |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 147 | xp_dma_sync_for_cpu_slow(xskb); |
| 148 | } |
| 149 | |
| 150 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, |
| 151 | size_t size); |
| 152 | static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, |
| 153 | dma_addr_t dma, size_t size) |
| 154 | { |
Christoph Hellwig | 91d5b70 | 2020-06-29 15:03:57 +0200 | [diff] [blame] | 155 | if (!pool->dma_need_sync) |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 156 | return; |
| 157 | |
| 158 | xp_dma_sync_for_device_slow(pool, dma, size); |
| 159 | } |
| 160 | |
| 161 | /* Masks for xdp_umem_page flags. |
| 162 | * The low 12-bits of the addr will be 0 since this is the page address, so we |
| 163 | * can use them for flags. |
| 164 | */ |
| 165 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 |
| 166 | #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) |
| 167 | |
| 168 | static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, |
| 169 | u64 addr, u32 len) |
| 170 | { |
| 171 | bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; |
| 172 | |
Magnus Karlsson | 2f99619 | 2021-06-17 11:22:55 +0200 | [diff] [blame] | 173 | if (likely(!cross_pg)) |
| 174 | return false; |
| 175 | |
| 176 | if (pool->dma_pages_cnt) { |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 177 | return !(pool->dma_pages[addr >> PAGE_SHIFT] & |
| 178 | XSK_NEXT_PG_CONTIG_MASK); |
| 179 | } |
Magnus Karlsson | 2f99619 | 2021-06-17 11:22:55 +0200 | [diff] [blame] | 180 | |
| 181 | /* skb path */ |
| 182 | return addr + len > pool->addrs_cnt; |
Björn Töpel | 26062b1 | 2020-05-20 21:21:02 +0200 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) |
| 186 | { |
| 187 | return addr & pool->chunk_mask; |
| 188 | } |
| 189 | |
| 190 | static inline u64 xp_unaligned_extract_addr(u64 addr) |
| 191 | { |
| 192 | return addr & XSK_UNALIGNED_BUF_ADDR_MASK; |
| 193 | } |
| 194 | |
| 195 | static inline u64 xp_unaligned_extract_offset(u64 addr) |
| 196 | { |
| 197 | return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; |
| 198 | } |
| 199 | |
| 200 | static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) |
| 201 | { |
| 202 | return xp_unaligned_extract_addr(addr) + |
| 203 | xp_unaligned_extract_offset(addr); |
| 204 | } |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 205 | |
Magnus Karlsson | 94033cd | 2021-09-22 09:56:06 +0200 | [diff] [blame] | 206 | static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr) |
| 207 | { |
| 208 | return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; |
| 209 | } |
| 210 | |
| 211 | static inline void xp_release(struct xdp_buff_xsk *xskb) |
| 212 | { |
| 213 | if (xskb->pool->unaligned) |
| 214 | xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; |
| 215 | } |
| 216 | |
| 217 | static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) |
| 218 | { |
| 219 | u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; |
| 220 | |
| 221 | offset += xskb->pool->headroom; |
| 222 | if (!xskb->pool->unaligned) |
| 223 | return xskb->orig_addr + offset; |
| 224 | return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); |
| 225 | } |
| 226 | |
Björn Töpel | 2b43470 | 2020-05-20 21:20:53 +0200 | [diff] [blame] | 227 | #endif /* XSK_BUFF_POOL_H_ */ |