blob: c9d87cc40c1172a64cd8df82c12d1983dbbe30e0 [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
Björn Töpele61e62b92018-06-04 14:05:51 +02009#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020011#include <linux/mutex.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020012#include <linux/spinlock.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020013#include <linux/mm.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020014#include <net/sock.h>
15
Björn Töpelb9b6b682018-05-02 13:01:25 +020016struct net_device;
17struct xsk_queue;
Magnus Karlssona71506a2020-05-20 21:20:51 +020018struct xdp_buff;
Björn Töpele61e62b92018-06-04 14:05:51 +020019
Björn Töpele61e62b92018-06-04 14:05:51 +020020struct xdp_umem {
21 struct xsk_queue *fq;
22 struct xsk_queue *cq;
Björn Töpel2b434702020-05-20 21:20:53 +020023 struct xsk_buff_pool *pool;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020024 u64 size;
Björn Töpele61e62b92018-06-04 14:05:51 +020025 u32 headroom;
Björn Töpel2b434702020-05-20 21:20:53 +020026 u32 chunk_size;
Björn Töpele61e62b92018-06-04 14:05:51 +020027 struct user_struct *user;
Björn Töpele61e62b92018-06-04 14:05:51 +020028 refcount_t users;
29 struct work_struct work;
Björn Töpel8aef7342018-06-04 14:05:52 +020030 struct page **pgs;
Björn Töpele61e62b92018-06-04 14:05:51 +020031 u32 npgs;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020032 u16 queue_id;
33 u8 need_wakeup;
34 u8 flags;
Björn Töpel50e74c02019-01-24 19:59:38 +010035 int id;
Björn Töpel173d3ad2018-06-04 14:05:55 +020036 struct net_device *dev;
Björn Töpel173d3ad2018-06-04 14:05:55 +020037 bool zc;
Magnus Karlssone4e5aef2020-05-04 15:33:51 +020038 spinlock_t xsk_tx_list_lock;
39 struct list_head xsk_tx_list;
Björn Töpele61e62b92018-06-04 14:05:51 +020040};
Björn Töpelc0c77d82018-05-02 13:01:23 +020041
Björn Töpeld8179912019-11-01 12:03:46 +010042struct xsk_map {
43 struct bpf_map map;
Björn Töpeld8179912019-11-01 12:03:46 +010044 spinlock_t lock; /* Synchronize map updates */
45 struct xdp_sock *xsk_map[];
46};
47
Björn Töpelc0c77d82018-05-02 13:01:23 +020048struct xdp_sock {
49 /* struct sock must be the first member of struct xdp_sock */
50 struct sock sk;
Björn Töpelb9b6b682018-05-02 13:01:25 +020051 struct xsk_queue *rx;
52 struct net_device *dev;
Björn Töpelc0c77d82018-05-02 13:01:23 +020053 struct xdp_umem *umem;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020054 struct list_head flush_node;
Magnus Karlsson965a9902018-05-02 13:01:26 +020055 u16 queue_id;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020056 bool zc;
Ilya Maximets455302d2019-06-28 11:04:07 +030057 enum {
58 XSK_READY = 0,
59 XSK_BOUND,
60 XSK_UNBOUND,
61 } state;
Björn Töpelc0c77d82018-05-02 13:01:23 +020062 /* Protects multiple processes in the control path */
63 struct mutex mutex;
Jonathan Lemonfada7fd2019-06-06 13:59:40 -070064 struct xsk_queue *tx ____cacheline_aligned_in_smp;
65 struct list_head list;
Magnus Karlssona9744f72018-06-29 09:48:20 +020066 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
67 * in the SKB destructor callback.
68 */
69 spinlock_t tx_completion_lock;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +030070 /* Protects generic receive. */
71 spinlock_t rx_lock;
Ciara Loftus8aa5a332020-07-08 07:28:33 +000072
73 /* Statistics */
Björn Töpelc4971762018-05-02 13:01:27 +020074 u64 rx_dropped;
Ciara Loftus8aa5a332020-07-08 07:28:33 +000075 u64 rx_queue_full;
76
Björn Töpel0402acd2019-08-15 11:30:13 +020077 struct list_head map_list;
78 /* Protects map_list */
79 spinlock_t map_list_lock;
Björn Töpelc0c77d82018-05-02 13:01:23 +020080};
81
Björn Töpelc4971762018-05-02 13:01:27 +020082#ifdef CONFIG_XDP_SOCKETS
Björn Töpel90254032018-08-28 14:44:27 +020083
Magnus Karlssona71506a2020-05-20 21:20:51 +020084int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
Björn Töpele312b9e2019-12-19 07:10:02 +010085int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
86void __xsk_map_flush(void);
Björn Töpeld8179912019-11-01 12:03:46 +010087
88static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
89 u32 key)
90{
91 struct xsk_map *m = container_of(map, struct xsk_map, map);
92 struct xdp_sock *xs;
93
94 if (key >= map->max_entries)
95 return NULL;
96
97 xs = READ_ONCE(m->xsk_map[key]);
98 return xs;
99}
Björn Töpel0402acd2019-08-15 11:30:13 +0200100
Björn Töpelc4971762018-05-02 13:01:27 +0200101#else
Magnus Karlssona71506a2020-05-20 21:20:51 +0200102
Björn Töpelc4971762018-05-02 13:01:27 +0200103static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
104{
105 return -ENOTSUPP;
106}
107
Magnus Karlssona71506a2020-05-20 21:20:51 +0200108static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300109{
Magnus Karlssona71506a2020-05-20 21:20:51 +0200110 return -EOPNOTSUPP;
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +0300111}
112
Magnus Karlssona71506a2020-05-20 21:20:51 +0200113static inline void __xsk_map_flush(void)
Björn Töpel90254032018-08-28 14:44:27 +0200114{
115}
116
Magnus Karlssona71506a2020-05-20 21:20:51 +0200117static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
118 u32 key)
Jakub Kicinski1661d342018-10-01 14:51:36 +0200119{
120 return NULL;
121}
122
Björn Töpelc4971762018-05-02 13:01:27 +0200123#endif /* CONFIG_XDP_SOCKETS */
124
Björn Töpelc0c77d82018-05-02 13:01:23 +0200125#endif /* _LINUX_XDP_SOCK_H */