blob: 6a986dcbc336b3f14a3c339cf2ca48232bcfd24c [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
Björn Töpele61e62b92018-06-04 14:05:51 +02009#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020011#include <linux/mutex.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020012#include <linux/spinlock.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020013#include <linux/mm.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020014#include <net/sock.h>
15
Björn Töpelb9b6b682018-05-02 13:01:25 +020016struct net_device;
17struct xsk_queue;
Magnus Karlssona71506a2020-05-20 21:20:51 +020018struct xdp_buff;
Björn Töpele61e62b92018-06-04 14:05:51 +020019
Kevin Laatzc05cd362019-08-27 02:25:22 +000020/* Masks for xdp_umem_page flags.
21 * The low 12-bits of the addr will be 0 since this is the page address, so we
22 * can use them for flags.
23 */
24#define XSK_NEXT_PG_CONTIG_SHIFT 0
25#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
26
Björn Töpel8aef7342018-06-04 14:05:52 +020027struct xdp_umem_page {
28 void *addr;
Björn Töpel173d3ad2018-06-04 14:05:55 +020029 dma_addr_t dma;
Björn Töpel8aef7342018-06-04 14:05:52 +020030};
31
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020032struct xdp_umem_fq_reuse {
33 u32 nentries;
34 u32 length;
35 u64 handles[];
36};
37
Kevin Laatzc05cd362019-08-27 02:25:22 +000038/* Flags for the umem flags field.
39 *
40 * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
41 * flags. See inlude/uapi/include/linux/if_xdp.h.
42 */
43#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020044
Björn Töpele61e62b92018-06-04 14:05:51 +020045struct xdp_umem {
46 struct xsk_queue *fq;
47 struct xsk_queue *cq;
Björn Töpel8aef7342018-06-04 14:05:52 +020048 struct xdp_umem_page *pages;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020049 u64 chunk_mask;
50 u64 size;
Björn Töpele61e62b92018-06-04 14:05:51 +020051 u32 headroom;
52 u32 chunk_size_nohr;
53 struct user_struct *user;
Björn Töpele61e62b92018-06-04 14:05:51 +020054 refcount_t users;
55 struct work_struct work;
Björn Töpel8aef7342018-06-04 14:05:52 +020056 struct page **pgs;
Björn Töpele61e62b92018-06-04 14:05:51 +020057 u32 npgs;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020058 u16 queue_id;
59 u8 need_wakeup;
60 u8 flags;
Björn Töpel50e74c02019-01-24 19:59:38 +010061 int id;
Björn Töpel173d3ad2018-06-04 14:05:55 +020062 struct net_device *dev;
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020063 struct xdp_umem_fq_reuse *fq_reuse;
Björn Töpel173d3ad2018-06-04 14:05:55 +020064 bool zc;
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020065 spinlock_t xsk_tx_list_lock;
66 struct list_head xsk_tx_list;
Björn Töpele61e62b92018-06-04 14:05:51 +020067};
Björn Töpelc0c77d82018-05-02 13:01:23 +020068
Björn Töpeld8179912019-11-01 12:03:46 +010069struct xsk_map {
70 struct bpf_map map;
Björn Töpeld8179912019-11-01 12:03:46 +010071 spinlock_t lock; /* Synchronize map updates */
72 struct xdp_sock *xsk_map[];
73};
74
Björn Töpelc0c77d82018-05-02 13:01:23 +020075struct xdp_sock {
76 /* struct sock must be the first member of struct xdp_sock */
77 struct sock sk;
Björn Töpelb9b6b682018-05-02 13:01:25 +020078 struct xsk_queue *rx;
79 struct net_device *dev;
Björn Töpelc0c77d82018-05-02 13:01:23 +020080 struct xdp_umem *umem;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020081 struct list_head flush_node;
Magnus Karlsson965a9902018-05-02 13:01:26 +020082 u16 queue_id;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020083 bool zc;
Ilya Maximets455302d2019-06-28 11:04:07 +030084 enum {
85 XSK_READY = 0,
86 XSK_BOUND,
87 XSK_UNBOUND,
88 } state;
Björn Töpelc0c77d82018-05-02 13:01:23 +020089 /* Protects multiple processes in the control path */
90 struct mutex mutex;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -070091 struct xsk_queue *tx ____cacheline_aligned_in_smp;
92 struct list_head list;
Magnus Karlssona9744f72018-06-29 09:48:20 +020093 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
94 * in the SKB destructor callback.
95 */
96 spinlock_t tx_completion_lock;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +030097 /* Protects generic receive. */
98 spinlock_t rx_lock;
Björn Töpelc4971762018-05-02 13:01:27 +020099 u64 rx_dropped;
Björn Töpel0402acd2019-08-15 11:30:13 +0200100 struct list_head map_list;
101 /* Protects map_list */
102 spinlock_t map_list_lock;
Björn Töpelc0c77d82018-05-02 13:01:23 +0200103};
104
Björn Töpelc4971762018-05-02 13:01:27 +0200105#ifdef CONFIG_XDP_SOCKETS
Björn Töpel90254032018-08-28 14:44:27 +0200106
Magnus Karlssona71506a2020-05-20 21:20:51 +0200107int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
Björn Töpele312b9e2019-12-19 07:10:02 +0100108int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
109void __xsk_map_flush(void);
Björn Töpeld8179912019-11-01 12:03:46 +0100110
111static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
112 u32 key)
113{
114 struct xsk_map *m = container_of(map, struct xsk_map, map);
115 struct xdp_sock *xs;
116
117 if (key >= map->max_entries)
118 return NULL;
119
120 xs = READ_ONCE(m->xsk_map[key]);
121 return xs;
122}
Björn Töpel0402acd2019-08-15 11:30:13 +0200123
Kevin Laatzc05cd362019-08-27 02:25:22 +0000124static inline u64 xsk_umem_extract_addr(u64 addr)
125{
126 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
127}
128
129static inline u64 xsk_umem_extract_offset(u64 addr)
130{
131 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
132}
133
134static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
135{
136 return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
137}
138
Björn Töpelc4971762018-05-02 13:01:27 +0200139#else
Magnus Karlssona71506a2020-05-20 21:20:51 +0200140
Björn Töpelc4971762018-05-02 13:01:27 +0200141static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
142{
143 return -ENOTSUPP;
144}
145
Magnus Karlssona71506a2020-05-20 21:20:51 +0200146static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300147{
Magnus Karlssona71506a2020-05-20 21:20:51 +0200148 return -EOPNOTSUPP;
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300149}
150
Magnus Karlssona71506a2020-05-20 21:20:51 +0200151static inline void __xsk_map_flush(void)
Björn Töpel90254032018-08-28 14:44:27 +0200152{
153}
154
Magnus Karlssona71506a2020-05-20 21:20:51 +0200155static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
156 u32 key)
Jakub Kicinski1661d342018-10-01 14:51:36 +0200157{
158 return NULL;
159}
160
Kevin Laatzc05cd362019-08-27 02:25:22 +0000161static inline u64 xsk_umem_extract_addr(u64 addr)
162{
163 return 0;
164}
165
166static inline u64 xsk_umem_extract_offset(u64 addr)
167{
168 return 0;
169}
170
171static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
172{
173 return 0;
174}
175
Björn Töpelc4971762018-05-02 13:01:27 +0200176#endif /* CONFIG_XDP_SOCKETS */
177
Björn Töpelc0c77d82018-05-02 13:01:23 +0200178#endif /* _LINUX_XDP_SOCK_H */