blob: a4bc4749faaccf655657a861e42a7808d224bfbc [file] [log] [blame]
Björn Töpela36b38aa2019-01-24 19:59:39 +01001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2019 Intel Corporation. */
3
4#ifndef XSK_H_
5#define XSK_H_
6
Björn Töpel89e4a372020-05-20 21:20:52 +02007/* Masks for xdp_umem_page flags.
8 * The low 12-bits of the addr will be 0 since this is the page address, so we
9 * can use them for flags.
10 */
11#define XSK_NEXT_PG_CONTIG_SHIFT 0
12#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
13
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020014struct xdp_ring_offset_v1 {
15 __u64 producer;
16 __u64 consumer;
17 __u64 desc;
18};
19
20struct xdp_mmap_offsets_v1 {
21 struct xdp_ring_offset_v1 rx;
22 struct xdp_ring_offset_v1 tx;
23 struct xdp_ring_offset_v1 fr;
24 struct xdp_ring_offset_v1 cr;
25};
26
Björn Töpeld20a16762020-05-20 21:20:50 +020027/* Nodes are linked in the struct xdp_sock map_list field, and used to
28 * track which maps a certain socket reside in.
29 */
30
31struct xsk_map_node {
32 struct list_head node;
33 struct xsk_map *map;
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +020034 struct xdp_sock __rcu **map_entry;
Björn Töpeld20a16762020-05-20 21:20:50 +020035};
36
Björn Töpela36b38aa2019-01-24 19:59:39 +010037static inline struct xdp_sock *xdp_sk(struct sock *sk)
38{
39 return (struct xdp_sock *)sk;
40}
41
Björn Töpeld20a16762020-05-20 21:20:50 +020042void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +020043 struct xdp_sock __rcu **map_entry);
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020044void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id);
45int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
46 u16 queue_id);
Björn Töpeld20a16762020-05-20 21:20:50 +020047
Björn Töpela36b38aa2019-01-24 19:59:39 +010048#endif /* XSK_H_ */