blob: 3057e1a4a11c6341499f56843c2cf7eb5c6565b4 [file] [log] [blame]
Björn Töpeldac091492018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
Jakub Kicinskib6459412021-12-28 16:49:13 -08009#include <linux/bpf.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020010#include <linux/workqueue.h>
11#include <linux/if_xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020012#include <linux/mutex.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020013#include <linux/spinlock.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020014#include <linux/mm.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020015#include <net/sock.h>
16
Björn Töpelb9b6b682018-05-02 13:01:25 +020017struct net_device;
18struct xsk_queue;
Magnus Karlssona71506a2020-05-20 21:20:51 +020019struct xdp_buff;
Björn Töpele61e62b92018-06-04 14:05:51 +020020
Björn Töpele61e62b92018-06-04 14:05:51 +020021struct xdp_umem {
Magnus Karlsson7f7ffa42020-08-28 10:26:21 +020022 void *addrs;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020023 u64 size;
Björn Töpele61e62b92018-06-04 14:05:51 +020024 u32 headroom;
Björn Töpel2b434702020-05-20 21:20:53 +020025 u32 chunk_size;
Magnus Karlsson1c1efc22020-08-28 10:26:17 +020026 u32 chunks;
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020027 u32 npgs;
Björn Töpele61e62b92018-06-04 14:05:51 +020028 struct user_struct *user;
Björn Töpele61e62b92018-06-04 14:05:51 +020029 refcount_t users;
Magnus Karlsson77cd0d72019-08-14 09:27:17 +020030 u8 flags;
Björn Töpel173d3ad2018-06-04 14:05:55 +020031 bool zc;
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020032 struct page **pgs;
33 int id;
Magnus Karlsson921b6862020-08-28 10:26:22 +020034 struct list_head xsk_dma_list;
Magnus Karlsson537cf4e2020-11-20 12:53:39 +010035 struct work_struct work;
Björn Töpele61e62b92018-06-04 14:05:51 +020036};
Björn Töpelc0c77d82018-05-02 13:01:23 +020037
Björn Töpeld8179912019-11-01 12:03:46 +010038struct xsk_map {
39 struct bpf_map map;
Björn Töpeld8179912019-11-01 12:03:46 +010040 spinlock_t lock; /* Synchronize map updates */
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +020041 struct xdp_sock __rcu *xsk_map[];
Björn Töpeld8179912019-11-01 12:03:46 +010042};
43
Björn Töpelc0c77d82018-05-02 13:01:23 +020044struct xdp_sock {
45 /* struct sock must be the first member of struct xdp_sock */
46 struct sock sk;
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020047 struct xsk_queue *rx ____cacheline_aligned_in_smp;
Björn Töpelb9b6b682018-05-02 13:01:25 +020048 struct net_device *dev;
Björn Töpelc0c77d82018-05-02 13:01:23 +020049 struct xdp_umem *umem;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020050 struct list_head flush_node;
Magnus Karlssonc4655762020-08-28 10:26:16 +020051 struct xsk_buff_pool *pool;
Magnus Karlsson965a9902018-05-02 13:01:26 +020052 u16 queue_id;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020053 bool zc;
Ilya Maximets455302d2019-06-28 11:04:07 +030054 enum {
55 XSK_READY = 0,
56 XSK_BOUND,
57 XSK_UNBOUND,
58 } state;
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020059
Jonathan Lemonfada7fd2019-06-06 13:59:40 -070060 struct xsk_queue *tx ____cacheline_aligned_in_smp;
Magnus Karlssona5aa8e52020-08-28 10:26:20 +020061 struct list_head tx_list;
Ilya Maximetsbf0bdd12019-07-03 15:09:16 +030062 /* Protects generic receive. */
63 spinlock_t rx_lock;
Ciara Loftus8aa5a332020-07-08 07:28:33 +000064
65 /* Statistics */
Björn Töpelc4971762018-05-02 13:01:27 +020066 u64 rx_dropped;
Ciara Loftus8aa5a332020-07-08 07:28:33 +000067 u64 rx_queue_full;
68
Björn Töpel0402acd2019-08-15 11:30:13 +020069 struct list_head map_list;
70 /* Protects map_list */
71 spinlock_t map_list_lock;
Magnus Karlsson8ef4e272020-08-28 10:26:23 +020072 /* Protects multiple processes in the control path */
73 struct mutex mutex;
Magnus Karlsson7361f9c2020-08-28 10:26:18 +020074 struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
75 struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
Björn Töpelc0c77d82018-05-02 13:01:23 +020076};
77
Björn Töpelc4971762018-05-02 13:01:27 +020078#ifdef CONFIG_XDP_SOCKETS
Björn Töpel90254032018-08-28 14:44:27 +020079
Magnus Karlssona71506a2020-05-20 21:20:51 +020080int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
Björn Töpele312b9e2019-12-19 07:10:02 +010081int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
82void __xsk_map_flush(void);
Björn Töpeld8179912019-11-01 12:03:46 +010083
Björn Töpelc4971762018-05-02 13:01:27 +020084#else
Magnus Karlssona71506a2020-05-20 21:20:51 +020085
Björn Töpelc4971762018-05-02 13:01:27 +020086static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
87{
88 return -ENOTSUPP;
89}
90
Magnus Karlssona71506a2020-05-20 21:20:51 +020091static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030092{
Magnus Karlssona71506a2020-05-20 21:20:51 +020093 return -EOPNOTSUPP;
Maxim Mikityanskiyd57d7642019-06-26 17:35:24 +030094}
95
Magnus Karlssona71506a2020-05-20 21:20:51 +020096static inline void __xsk_map_flush(void)
Björn Töpel90254032018-08-28 14:44:27 +020097{
98}
99
Björn Töpelc4971762018-05-02 13:01:27 +0200100#endif /* CONFIG_XDP_SOCKETS */
101
Björn Töpelc0c77d82018-05-02 13:01:23 +0200102#endif /* _LINUX_XDP_SOCK_H */