blob: 7a9a23e7a604a97ee63466197cefedebed82d64d [file] [log] [blame]
Björn Töpel2b434702020-05-20 21:20:53 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef XSK_BUFF_POOL_H_
5#define XSK_BUFF_POOL_H_
6
Björn Töpel26062b12020-05-20 21:21:02 +02007#include <linux/if_xdp.h>
Björn Töpel2b434702020-05-20 21:20:53 +02008#include <linux/types.h>
9#include <linux/dma-mapping.h>
10#include <net/xdp.h>
11
12struct xsk_buff_pool;
13struct xdp_rxq_info;
14struct xsk_queue;
15struct xdp_desc;
Magnus Karlsson1742b3d2020-08-28 10:26:15 +020016struct xdp_umem;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020017struct xdp_sock;
Björn Töpel2b434702020-05-20 21:20:53 +020018struct device;
19struct page;
20
21struct xdp_buff_xsk {
22 struct xdp_buff xdp;
23 dma_addr_t dma;
24 dma_addr_t frame_dma;
25 struct xsk_buff_pool *pool;
26 bool unaligned;
27 u64 orig_addr;
28 struct list_head free_list_node;
29};
30
Magnus Karlsson921b6862020-08-28 10:26:22 +020031struct xsk_dma_map {
32 dma_addr_t *dma_pages;
33 struct device *dev;
34 struct net_device *netdev;
35 refcount_t users;
36 struct list_head list; /* Protected by the RTNL_LOCK */
37 u32 dma_pages_cnt;
38 bool dma_need_sync;
39};
40
Björn Töpel26062b12020-05-20 21:21:02 +020041struct xsk_buff_pool {
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020042 /* Members only used in the control path first. */
43 struct device *dev;
44 struct net_device *netdev;
45 struct list_head xsk_tx_list;
46 /* Protects modifications to the xsk_tx_list */
47 spinlock_t xsk_tx_list_lock;
48 refcount_t users;
49 struct xdp_umem *umem;
50 struct work_struct work;
Björn Töpel26062b12020-05-20 21:21:02 +020051 struct list_head free_list;
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020052 u32 heads_cnt;
53 u16 queue_id;
54
55 /* Data path members as close to free_heads at the end as possible. */
56 struct xsk_queue *fq ____cacheline_aligned_in_smp;
57 struct xsk_queue *cq;
Magnus Karlsson921b6862020-08-28 10:26:22 +020058 /* For performance reasons, each buff pool has its own array of dma_pages
59 * even when they are identical.
60 */
Björn Töpel26062b12020-05-20 21:21:02 +020061 dma_addr_t *dma_pages;
62 struct xdp_buff_xsk *heads;
63 u64 chunk_mask;
64 u64 addrs_cnt;
65 u32 free_list_cnt;
66 u32 dma_pages_cnt;
Björn Töpel26062b12020-05-20 21:21:02 +020067 u32 free_heads_cnt;
68 u32 headroom;
69 u32 chunk_size;
70 u32 frame_len;
Magnus Karlssonc2d3d6a2020-08-28 10:26:19 +020071 u8 cached_need_wakeup;
72 bool uses_need_wakeup;
Christoph Hellwig91d5b702020-06-29 15:03:57 +020073 bool dma_need_sync;
Björn Töpel26062b12020-05-20 21:21:02 +020074 bool unaligned;
75 void *addrs;
Magnus Karlssonf09ced42020-12-18 14:45:24 +010076 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
77 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
78 * sockets share a single cq when the same netdev and queue id is shared.
79 */
80 spinlock_t cq_lock;
Björn Töpel26062b12020-05-20 21:21:02 +020081 struct xdp_buff_xsk *free_heads[];
82};
83
Björn Töpel2b434702020-05-20 21:20:53 +020084/* AF_XDP core. */
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020085struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
86 struct xdp_umem *umem);
87int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
88 u16 queue_id, u16 flags);
Magnus Karlssonb5aea282020-08-28 10:26:25 +020089int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
90 struct net_device *dev, u16 queue_id);
Björn Töpel2b434702020-05-20 21:20:53 +020091void xp_destroy(struct xsk_buff_pool *pool);
92void xp_release(struct xdp_buff_xsk *xskb);
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020093void xp_get_pool(struct xsk_buff_pool *pool);
Magnus Karlssone5e1a4b2020-10-27 13:32:01 +010094bool xp_put_pool(struct xsk_buff_pool *pool);
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020095void xp_clear_dev(struct xsk_buff_pool *pool);
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020096void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
97void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
Björn Töpel2b434702020-05-20 21:20:53 +020098
99/* AF_XDP, and XDP core. */
100void xp_free(struct xdp_buff_xsk *xskb);
101
102/* AF_XDP ZC drivers, via xdp_sock_buff.h */
103void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
104int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
105 unsigned long attrs, struct page **pages, u32 nr_pages);
106void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
107struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
108bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
109void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
110dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
Björn Töpel26062b12020-05-20 21:21:02 +0200111static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
112{
113 return xskb->dma;
114}
115
116static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
117{
118 return xskb->frame_dma;
119}
120
121void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
122static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
123{
Björn Töpel26062b12020-05-20 21:21:02 +0200124 xp_dma_sync_for_cpu_slow(xskb);
125}
126
127void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
128 size_t size);
129static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
130 dma_addr_t dma, size_t size)
131{
Christoph Hellwig91d5b702020-06-29 15:03:57 +0200132 if (!pool->dma_need_sync)
Björn Töpel26062b12020-05-20 21:21:02 +0200133 return;
134
135 xp_dma_sync_for_device_slow(pool, dma, size);
136}
137
138/* Masks for xdp_umem_page flags.
139 * The low 12-bits of the addr will be 0 since this is the page address, so we
140 * can use them for flags.
141 */
142#define XSK_NEXT_PG_CONTIG_SHIFT 0
143#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
144
145static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
146 u64 addr, u32 len)
147{
148 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
149
Magnus Karlsson2f996192021-06-17 11:22:55 +0200150 if (likely(!cross_pg))
151 return false;
152
153 if (pool->dma_pages_cnt) {
Björn Töpel26062b12020-05-20 21:21:02 +0200154 return !(pool->dma_pages[addr >> PAGE_SHIFT] &
155 XSK_NEXT_PG_CONTIG_MASK);
156 }
Magnus Karlsson2f996192021-06-17 11:22:55 +0200157
158 /* skb path */
159 return addr + len > pool->addrs_cnt;
Björn Töpel26062b12020-05-20 21:21:02 +0200160}
161
162static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
163{
164 return addr & pool->chunk_mask;
165}
166
167static inline u64 xp_unaligned_extract_addr(u64 addr)
168{
169 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
170}
171
172static inline u64 xp_unaligned_extract_offset(u64 addr)
173{
174 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
175}
176
177static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
178{
179 return xp_unaligned_extract_addr(addr) +
180 xp_unaligned_extract_offset(addr);
181}
Björn Töpel2b434702020-05-20 21:20:53 +0200182
183#endif /* XSK_BUFF_POOL_H_ */