blob: 49da2b8ace8b7d7673df3abcd5365ed1b2f196b6 [file] [log] [blame]
Björn Töpelfbfc504a2018-05-02 13:01:28 +02001// SPDX-License-Identifier: GPL-2.0
2/* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
Björn Töpelfbfc504a2018-05-02 13:01:28 +02004 */
5
6#include <linux/bpf.h>
7#include <linux/capability.h>
8#include <net/xdp_sock.h>
9#include <linux/slab.h>
10#include <linux/sched.h>
11
Björn Töpeld20a16762020-05-20 21:20:50 +020012#include "xsk.h"
13
Björn Töpel0402acd2019-08-15 11:30:13 +020014int xsk_map_inc(struct xsk_map *map)
15{
Andrii Nakryiko1e0bd5a2019-11-17 09:28:02 -080016 bpf_map_inc(&map->map);
17 return 0;
Björn Töpel0402acd2019-08-15 11:30:13 +020018}
19
20void xsk_map_put(struct xsk_map *map)
21{
22 bpf_map_put(&map->map);
23}
24
25static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
26 struct xdp_sock **map_entry)
27{
28 struct xsk_map_node *node;
29 int err;
30
31 node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
32 if (!node)
Jonathan Lemonfcd30ae2019-09-24 09:25:21 -070033 return ERR_PTR(-ENOMEM);
Björn Töpel0402acd2019-08-15 11:30:13 +020034
35 err = xsk_map_inc(map);
36 if (err) {
37 kfree(node);
38 return ERR_PTR(err);
39 }
40
41 node->map = map;
42 node->map_entry = map_entry;
43 return node;
44}
45
46static void xsk_map_node_free(struct xsk_map_node *node)
47{
48 xsk_map_put(node->map);
49 kfree(node);
50}
51
52static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
53{
54 spin_lock_bh(&xs->map_list_lock);
55 list_add_tail(&node->node, &xs->map_list);
56 spin_unlock_bh(&xs->map_list_lock);
57}
58
59static void xsk_map_sock_delete(struct xdp_sock *xs,
60 struct xdp_sock **map_entry)
61{
62 struct xsk_map_node *n, *tmp;
63
64 spin_lock_bh(&xs->map_list_lock);
65 list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
66 if (map_entry == n->map_entry) {
67 list_del(&n->node);
68 xsk_map_node_free(n);
69 }
70 }
71 spin_unlock_bh(&xs->map_list_lock);
72}
73
Björn Töpelfbfc504a2018-05-02 13:01:28 +020074static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
75{
Björn Töpel64fe8c02019-11-01 12:03:44 +010076 struct bpf_map_memory mem;
Björn Töpele312b9e2019-12-19 07:10:02 +010077 int err, numa_node;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020078 struct xsk_map *m;
Björn Töpele312b9e2019-12-19 07:10:02 +010079 u64 size;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020080
81 if (!capable(CAP_NET_ADMIN))
82 return ERR_PTR(-EPERM);
83
84 if (attr->max_entries == 0 || attr->key_size != 4 ||
85 attr->value_size != 4 ||
86 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
87 return ERR_PTR(-EINVAL);
88
Björn Töpel64fe8c02019-11-01 12:03:44 +010089 numa_node = bpf_map_attr_numa_node(attr);
90 size = struct_size(m, xsk_map, attr->max_entries);
Björn Töpel64fe8c02019-11-01 12:03:44 +010091
Björn Töpele312b9e2019-12-19 07:10:02 +010092 err = bpf_map_charge_init(&mem, size);
Björn Töpel64fe8c02019-11-01 12:03:44 +010093 if (err < 0)
94 return ERR_PTR(err);
95
96 m = bpf_map_area_alloc(size, numa_node);
97 if (!m) {
98 bpf_map_charge_finish(&mem);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020099 return ERR_PTR(-ENOMEM);
Björn Töpel64fe8c02019-11-01 12:03:44 +0100100 }
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200101
102 bpf_map_init_from_attr(&m->map, attr);
Björn Töpel64fe8c02019-11-01 12:03:44 +0100103 bpf_map_charge_move(&m->map.memory, &mem);
Björn Töpel0402acd2019-08-15 11:30:13 +0200104 spin_lock_init(&m->lock);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200105
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200106 return &m->map;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200107}
108
109static void xsk_map_free(struct bpf_map *map)
110{
111 struct xsk_map *m = container_of(map, struct xsk_map, map);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200112
Daniel Borkmannf6069b92018-08-17 23:26:14 +0200113 bpf_clear_redirect_map(map);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200114 synchronize_net();
Björn Töpel64fe8c02019-11-01 12:03:44 +0100115 bpf_map_area_free(m);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200116}
117
118static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
119{
120 struct xsk_map *m = container_of(map, struct xsk_map, map);
121 u32 index = key ? *(u32 *)key : U32_MAX;
122 u32 *next = next_key;
123
124 if (index >= m->map.max_entries) {
125 *next = 0;
126 return 0;
127 }
128
129 if (index == m->map.max_entries - 1)
130 return -ENOENT;
131 *next = index + 1;
132 return 0;
133}
134
Daniel Borkmann4a8f87e2020-10-11 01:40:03 +0200135static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
Maciej Fijalkowskie65650f2019-11-01 12:03:45 +0100136{
137 const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
138 struct bpf_insn *insn = insn_buf;
139
140 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
141 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
142 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
143 *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
144 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
145 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
146 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
147 *insn++ = BPF_MOV64_IMM(ret, 0);
148 return insn - insn_buf;
149}
150
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200151static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
152{
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700153 WARN_ON_ONCE(!rcu_read_lock_held());
154 return __xsk_map_lookup_elem(map, *(u32 *)key);
155}
156
157static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
158{
Prashant Bhole3b4a63f2018-10-09 10:04:50 +0900159 return ERR_PTR(-EOPNOTSUPP);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200160}
161
162static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
163 u64 map_flags)
164{
165 struct xsk_map *m = container_of(map, struct xsk_map, map);
Björn Töpel0402acd2019-08-15 11:30:13 +0200166 struct xdp_sock *xs, *old_xs, **map_entry;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200167 u32 i = *(u32 *)key, fd = *(u32 *)value;
Björn Töpel0402acd2019-08-15 11:30:13 +0200168 struct xsk_map_node *node;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200169 struct socket *sock;
170 int err;
171
172 if (unlikely(map_flags > BPF_EXIST))
173 return -EINVAL;
174 if (unlikely(i >= m->map.max_entries))
175 return -E2BIG;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200176
177 sock = sockfd_lookup(fd, &err);
178 if (!sock)
179 return err;
180
181 if (sock->sk->sk_family != PF_XDP) {
182 sockfd_put(sock);
183 return -EOPNOTSUPP;
184 }
185
186 xs = (struct xdp_sock *)sock->sk;
187
Björn Töpel0402acd2019-08-15 11:30:13 +0200188 map_entry = &m->xsk_map[i];
189 node = xsk_map_node_alloc(m, map_entry);
190 if (IS_ERR(node)) {
191 sockfd_put(sock);
192 return PTR_ERR(node);
193 }
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200194
Björn Töpel0402acd2019-08-15 11:30:13 +0200195 spin_lock_bh(&m->lock);
196 old_xs = READ_ONCE(*map_entry);
197 if (old_xs == xs) {
198 err = 0;
199 goto out;
Björn Töpel36cc34352019-08-15 11:30:14 +0200200 } else if (old_xs && map_flags == BPF_NOEXIST) {
201 err = -EEXIST;
202 goto out;
203 } else if (!old_xs && map_flags == BPF_EXIST) {
204 err = -ENOENT;
205 goto out;
Björn Töpel0402acd2019-08-15 11:30:13 +0200206 }
207 xsk_map_sock_add(xs, node);
208 WRITE_ONCE(*map_entry, xs);
Björn Töpelcee27162018-10-08 19:40:16 +0200209 if (old_xs)
Björn Töpel0402acd2019-08-15 11:30:13 +0200210 xsk_map_sock_delete(old_xs, map_entry);
211 spin_unlock_bh(&m->lock);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200212 sockfd_put(sock);
213 return 0;
Björn Töpel0402acd2019-08-15 11:30:13 +0200214
215out:
216 spin_unlock_bh(&m->lock);
217 sockfd_put(sock);
218 xsk_map_node_free(node);
219 return err;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200220}
221
222static int xsk_map_delete_elem(struct bpf_map *map, void *key)
223{
224 struct xsk_map *m = container_of(map, struct xsk_map, map);
Björn Töpel0402acd2019-08-15 11:30:13 +0200225 struct xdp_sock *old_xs, **map_entry;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200226 int k = *(u32 *)key;
227
228 if (k >= map->max_entries)
229 return -EINVAL;
230
Björn Töpel0402acd2019-08-15 11:30:13 +0200231 spin_lock_bh(&m->lock);
232 map_entry = &m->xsk_map[k];
233 old_xs = xchg(map_entry, NULL);
Björn Töpelcee27162018-10-08 19:40:16 +0200234 if (old_xs)
Björn Töpel0402acd2019-08-15 11:30:13 +0200235 xsk_map_sock_delete(old_xs, map_entry);
236 spin_unlock_bh(&m->lock);
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200237
238 return 0;
239}
240
Björn Töpel0402acd2019-08-15 11:30:13 +0200241void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
242 struct xdp_sock **map_entry)
243{
244 spin_lock_bh(&map->lock);
245 if (READ_ONCE(*map_entry) == xs) {
246 WRITE_ONCE(*map_entry, NULL);
247 xsk_map_sock_delete(xs, map_entry);
248 }
249 spin_unlock_bh(&map->lock);
250}
251
Martin KaFai Lau134fede2020-08-27 18:18:13 -0700252static bool xsk_map_meta_equal(const struct bpf_map *meta0,
253 const struct bpf_map *meta1)
254{
255 return meta0->max_entries == meta1->max_entries &&
256 bpf_map_meta_equal(meta0, meta1);
257}
258
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700259static int xsk_map_btf_id;
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200260const struct bpf_map_ops xsk_map_ops = {
Martin KaFai Lau134fede2020-08-27 18:18:13 -0700261 .map_meta_equal = xsk_map_meta_equal,
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200262 .map_alloc = xsk_map_alloc,
263 .map_free = xsk_map_free,
264 .map_get_next_key = xsk_map_get_next_key,
265 .map_lookup_elem = xsk_map_lookup_elem,
Maciej Fijalkowskie65650f2019-11-01 12:03:45 +0100266 .map_gen_lookup = xsk_map_gen_lookup,
Jonathan Lemonfada7fd2019-06-06 13:59:40 -0700267 .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200268 .map_update_elem = xsk_map_update_elem,
269 .map_delete_elem = xsk_map_delete_elem,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200270 .map_check_btf = map_check_no_btf,
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700271 .map_btf_name = "xsk_map",
272 .map_btf_id = &xsk_map_btf_id,
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200273};