blob: 65b53fb3de13dfb910557758f9637cb5d5d2e93b [file] [log] [blame]
Björn Töpelfbfc504a2018-05-02 13:01:28 +02001// SPDX-License-Identifier: GPL-2.0
2/* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
Björn Töpelfbfc504a2018-05-02 13:01:28 +02004 */
5
6#include <linux/bpf.h>
Jakub Kicinskib6459412021-12-28 16:49:13 -08007#include <linux/filter.h>
Björn Töpelfbfc504a2018-05-02 13:01:28 +02008#include <linux/capability.h>
9#include <net/xdp_sock.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12
Björn Töpeld20a16762020-05-20 21:20:50 +020013#include "xsk.h"
14
Björn Töpel0402acd2019-08-15 11:30:13 +020015static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +020016 struct xdp_sock __rcu **map_entry)
Björn Töpel0402acd2019-08-15 11:30:13 +020017{
18 struct xsk_map_node *node;
Björn Töpel0402acd2019-08-15 11:30:13 +020019
Roman Gushchin28e1dcd2020-12-01 13:58:43 -080020 node = bpf_map_kzalloc(&map->map, sizeof(*node),
21 GFP_ATOMIC | __GFP_NOWARN);
Björn Töpel0402acd2019-08-15 11:30:13 +020022 if (!node)
Jonathan Lemonfcd30ae2019-09-24 09:25:21 -070023 return ERR_PTR(-ENOMEM);
Björn Töpel0402acd2019-08-15 11:30:13 +020024
Zhu Yanjunbb1b25c2020-11-26 23:03:18 +080025 bpf_map_inc(&map->map);
Björn Töpel0402acd2019-08-15 11:30:13 +020026
27 node->map = map;
28 node->map_entry = map_entry;
29 return node;
30}
31
32static void xsk_map_node_free(struct xsk_map_node *node)
33{
Zhu Yanjunbb1b25c2020-11-26 23:03:18 +080034 bpf_map_put(&node->map->map);
Björn Töpel0402acd2019-08-15 11:30:13 +020035 kfree(node);
36}
37
38static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
39{
40 spin_lock_bh(&xs->map_list_lock);
41 list_add_tail(&node->node, &xs->map_list);
42 spin_unlock_bh(&xs->map_list_lock);
43}
44
45static void xsk_map_sock_delete(struct xdp_sock *xs,
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +020046 struct xdp_sock __rcu **map_entry)
Björn Töpel0402acd2019-08-15 11:30:13 +020047{
48 struct xsk_map_node *n, *tmp;
49
50 spin_lock_bh(&xs->map_list_lock);
51 list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
52 if (map_entry == n->map_entry) {
53 list_del(&n->node);
54 xsk_map_node_free(n);
55 }
56 }
57 spin_unlock_bh(&xs->map_list_lock);
58}
59
Björn Töpelfbfc504a2018-05-02 13:01:28 +020060static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
61{
Björn Töpelfbfc504a2018-05-02 13:01:28 +020062 struct xsk_map *m;
Roman Gushchin819a4f32020-12-01 13:58:56 -080063 int numa_node;
Björn Töpele312b9e2019-12-19 07:10:02 +010064 u64 size;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020065
66 if (!capable(CAP_NET_ADMIN))
67 return ERR_PTR(-EPERM);
68
69 if (attr->max_entries == 0 || attr->key_size != 4 ||
70 attr->value_size != 4 ||
71 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
72 return ERR_PTR(-EINVAL);
73
Björn Töpel64fe8c02019-11-01 12:03:44 +010074 numa_node = bpf_map_attr_numa_node(attr);
75 size = struct_size(m, xsk_map, attr->max_entries);
Björn Töpel64fe8c02019-11-01 12:03:44 +010076
Björn Töpel64fe8c02019-11-01 12:03:44 +010077 m = bpf_map_area_alloc(size, numa_node);
Roman Gushchin819a4f32020-12-01 13:58:56 -080078 if (!m)
Björn Töpelfbfc504a2018-05-02 13:01:28 +020079 return ERR_PTR(-ENOMEM);
80
81 bpf_map_init_from_attr(&m->map, attr);
Björn Töpel0402acd2019-08-15 11:30:13 +020082 spin_lock_init(&m->lock);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020083
Björn Töpelfbfc504a2018-05-02 13:01:28 +020084 return &m->map;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020085}
86
87static void xsk_map_free(struct bpf_map *map)
88{
89 struct xsk_map *m = container_of(map, struct xsk_map, map);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020090
91 synchronize_net();
Björn Töpel64fe8c02019-11-01 12:03:44 +010092 bpf_map_area_free(m);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020093}
94
95static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
96{
97 struct xsk_map *m = container_of(map, struct xsk_map, map);
98 u32 index = key ? *(u32 *)key : U32_MAX;
99 u32 *next = next_key;
100
101 if (index >= m->map.max_entries) {
102 *next = 0;
103 return 0;
104 }
105
106 if (index == m->map.max_entries - 1)
107 return -ENOENT;
108 *next = index + 1;
109 return 0;
110}
111
Daniel Borkmann4a8f87e2020-10-11 01:40:03 +0200112static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
Maciej Fijalkowskie65650f2019-11-01 12:03:45 +0100113{
114 const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
115 struct bpf_insn *insn = insn_buf;
116
117 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
118 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
119 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
120 *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
121 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
122 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
123 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
124 *insn++ = BPF_MOV64_IMM(ret, 0);
125 return insn - insn_buf;
126}
127
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200128/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
129 * by local_bh_disable() (from XDP calls inside NAPI). The
130 * rcu_read_lock_bh_held() below makes lockdep accept both.
131 */
Björn Töpele6a47502021-03-08 12:29:06 +0100132static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
133{
134 struct xsk_map *m = container_of(map, struct xsk_map, map);
135
136 if (key >= map->max_entries)
137 return NULL;
138
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200139 return rcu_dereference_check(m->xsk_map[key], rcu_read_lock_bh_held());
Björn Töpele6a47502021-03-08 12:29:06 +0100140}
141
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200142static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
143{
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700144 return __xsk_map_lookup_elem(map, *(u32 *)key);
145}
146
147static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
148{
Prashant Bhole3b4a63f2018-10-09 10:04:50 +0900149 return ERR_PTR(-EOPNOTSUPP);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200150}
151
152static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
153 u64 map_flags)
154{
155 struct xsk_map *m = container_of(map, struct xsk_map, map);
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200156 struct xdp_sock __rcu **map_entry;
157 struct xdp_sock *xs, *old_xs;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200158 u32 i = *(u32 *)key, fd = *(u32 *)value;
Björn Töpel0402acd2019-08-15 11:30:13 +0200159 struct xsk_map_node *node;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200160 struct socket *sock;
161 int err;
162
163 if (unlikely(map_flags > BPF_EXIST))
164 return -EINVAL;
165 if (unlikely(i >= m->map.max_entries))
166 return -E2BIG;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200167
168 sock = sockfd_lookup(fd, &err);
169 if (!sock)
170 return err;
171
172 if (sock->sk->sk_family != PF_XDP) {
173 sockfd_put(sock);
174 return -EOPNOTSUPP;
175 }
176
177 xs = (struct xdp_sock *)sock->sk;
178
Björn Töpel0402acd2019-08-15 11:30:13 +0200179 map_entry = &m->xsk_map[i];
180 node = xsk_map_node_alloc(m, map_entry);
181 if (IS_ERR(node)) {
182 sockfd_put(sock);
183 return PTR_ERR(node);
184 }
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200185
Björn Töpel0402acd2019-08-15 11:30:13 +0200186 spin_lock_bh(&m->lock);
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200187 old_xs = rcu_dereference_protected(*map_entry, lockdep_is_held(&m->lock));
Björn Töpel0402acd2019-08-15 11:30:13 +0200188 if (old_xs == xs) {
189 err = 0;
190 goto out;
Björn Töpel36cc34352019-08-15 11:30:14 +0200191 } else if (old_xs && map_flags == BPF_NOEXIST) {
192 err = -EEXIST;
193 goto out;
194 } else if (!old_xs && map_flags == BPF_EXIST) {
195 err = -ENOENT;
196 goto out;
Björn Töpel0402acd2019-08-15 11:30:13 +0200197 }
198 xsk_map_sock_add(xs, node);
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200199 rcu_assign_pointer(*map_entry, xs);
Björn Töpelcee27162018-10-08 19:40:16 +0200200 if (old_xs)
Björn Töpel0402acd2019-08-15 11:30:13 +0200201 xsk_map_sock_delete(old_xs, map_entry);
202 spin_unlock_bh(&m->lock);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200203 sockfd_put(sock);
204 return 0;
Björn Töpel0402acd2019-08-15 11:30:13 +0200205
206out:
207 spin_unlock_bh(&m->lock);
208 sockfd_put(sock);
209 xsk_map_node_free(node);
210 return err;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200211}
212
213static int xsk_map_delete_elem(struct bpf_map *map, void *key)
214{
215 struct xsk_map *m = container_of(map, struct xsk_map, map);
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200216 struct xdp_sock __rcu **map_entry;
217 struct xdp_sock *old_xs;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200218 int k = *(u32 *)key;
219
220 if (k >= map->max_entries)
221 return -EINVAL;
222
Björn Töpel0402acd2019-08-15 11:30:13 +0200223 spin_lock_bh(&m->lock);
224 map_entry = &m->xsk_map[k];
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200225 old_xs = unrcu_pointer(xchg(map_entry, NULL));
Björn Töpelcee27162018-10-08 19:40:16 +0200226 if (old_xs)
Björn Töpel0402acd2019-08-15 11:30:13 +0200227 xsk_map_sock_delete(old_xs, map_entry);
228 spin_unlock_bh(&m->lock);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200229
230 return 0;
231}
232
Björn Töpele6a47502021-03-08 12:29:06 +0100233static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
234{
Hangbin Liue624d4e2021-05-19 17:07:45 +0800235 return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
236 __xsk_map_lookup_elem);
Björn Töpele6a47502021-03-08 12:29:06 +0100237}
238
Björn Töpel0402acd2019-08-15 11:30:13 +0200239void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200240 struct xdp_sock __rcu **map_entry)
Björn Töpel0402acd2019-08-15 11:30:13 +0200241{
242 spin_lock_bh(&map->lock);
Toke Høiland-Jørgensen782347b2021-06-24 18:05:55 +0200243 if (rcu_access_pointer(*map_entry) == xs) {
244 rcu_assign_pointer(*map_entry, NULL);
Björn Töpel0402acd2019-08-15 11:30:13 +0200245 xsk_map_sock_delete(xs, map_entry);
246 }
247 spin_unlock_bh(&map->lock);
248}
249
Martin KaFai Lau134fede2020-08-27 18:18:13 -0700250static bool xsk_map_meta_equal(const struct bpf_map *meta0,
251 const struct bpf_map *meta1)
252{
253 return meta0->max_entries == meta1->max_entries &&
254 bpf_map_meta_equal(meta0, meta1);
255}
256
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700257static int xsk_map_btf_id;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200258const struct bpf_map_ops xsk_map_ops = {
Martin KaFai Lau134fede2020-08-27 18:18:13 -0700259 .map_meta_equal = xsk_map_meta_equal,
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200260 .map_alloc = xsk_map_alloc,
261 .map_free = xsk_map_free,
262 .map_get_next_key = xsk_map_get_next_key,
263 .map_lookup_elem = xsk_map_lookup_elem,
Maciej Fijalkowskie65650f2019-11-01 12:03:45 +0100264 .map_gen_lookup = xsk_map_gen_lookup,
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700265 .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200266 .map_update_elem = xsk_map_update_elem,
267 .map_delete_elem = xsk_map_delete_elem,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200268 .map_check_btf = map_check_no_btf,
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700269 .map_btf_name = "xsk_map",
270 .map_btf_id = &xsk_map_btf_id,
Björn Töpele6a47502021-03-08 12:29:06 +0100271 .map_redirect = xsk_map_redirect,
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200272};