blob: 0cbb72cdaf633d3b3fde68cd75e39d0a18a6d989 [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
John Fastabend546ac1f2017-07-17 09:28:56 -07002/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
John Fastabend546ac1f2017-07-17 09:28:56 -07003 */
4
5/* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
9 *
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +020020 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
John Fastabend546ac1f2017-07-17 09:28:56 -070022 *
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
John Fastabend2ddf71e2017-07-17 09:30:02 -070028 *
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
John Fastabend4cc7b952017-08-04 22:02:19 -070034 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +020040 *
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
John Fastabend546ac1f2017-07-17 09:28:56 -070046 */
47#include <linux/bpf.h>
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +020048#include <net/xdp.h>
John Fastabend546ac1f2017-07-17 09:28:56 -070049#include <linux/filter.h>
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +020050#include <trace/events/xdp.h>
John Fastabend546ac1f2017-07-17 09:28:56 -070051
Chenbo Feng6e71b042017-10-18 13:00:22 -070052#define DEV_CREATE_FLAG_MASK \
53 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
54
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +010055struct xdp_dev_bulk_queue {
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +020056 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +020057 struct list_head flush_node;
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +010058 struct net_device *dev;
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +020059 struct net_device *dev_rx;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +020060 unsigned int count;
61};
62
John Fastabend546ac1f2017-07-17 09:28:56 -070063struct bpf_dtab_netdev {
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +020064 struct net_device *dev; /* must be first member, due to tracepoint */
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +020065 struct hlist_node index_hlist;
John Fastabend546ac1f2017-07-17 09:28:56 -070066 struct bpf_dtab *dtab;
David Ahernfbee97f2020-05-29 16:07:13 -060067 struct bpf_prog *xdp_prog;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +020068 struct rcu_head rcu;
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +010069 unsigned int idx;
David Ahern7f1c0422020-05-29 16:07:12 -060070 struct bpf_devmap_val val;
John Fastabend546ac1f2017-07-17 09:28:56 -070071};
72
73struct bpf_dtab {
74 struct bpf_map map;
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +010075 struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
John Fastabend2ddf71e2017-07-17 09:30:02 -070076 struct list_head list;
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +020077
78 /* these are only used for DEVMAP_HASH type maps */
79 struct hlist_head *dev_index_head;
80 spinlock_t index_lock;
81 unsigned int items;
82 u32 n_buckets;
John Fastabend546ac1f2017-07-17 09:28:56 -070083};
84
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +010085static DEFINE_PER_CPU(struct list_head, dev_flush_list);
John Fastabend4cc7b952017-08-04 22:02:19 -070086static DEFINE_SPINLOCK(dev_map_lock);
John Fastabend2ddf71e2017-07-17 09:30:02 -070087static LIST_HEAD(dev_map_list);
88
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +020089static struct hlist_head *dev_map_create_hash(unsigned int entries)
90{
91 int i;
92 struct hlist_head *hash;
93
94 hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
95 if (hash != NULL)
96 for (i = 0; i < entries; i++)
97 INIT_HLIST_HEAD(&hash[i]);
98
99 return hash;
100}
101
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100102static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
103 int idx)
104{
105 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
106}
107
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200108static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
John Fastabend546ac1f2017-07-17 09:28:56 -0700109{
David Ahernfbee97f2020-05-29 16:07:13 -0600110 u32 valsize = attr->value_size;
Björn Töpel96360002019-12-19 07:10:03 +0100111 u64 cost = 0;
112 int err;
John Fastabend546ac1f2017-07-17 09:28:56 -0700113
David Ahernfbee97f2020-05-29 16:07:13 -0600114 /* check sanity of attributes. 2 value sizes supported:
115 * 4 bytes: ifindex
116 * 8 bytes: ifindex + prog fd
117 */
John Fastabend546ac1f2017-07-17 09:28:56 -0700118 if (attr->max_entries == 0 || attr->key_size != 4 ||
David Ahernfbee97f2020-05-29 16:07:13 -0600119 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
120 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
121 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200122 return -EINVAL;
John Fastabend546ac1f2017-07-17 09:28:56 -0700123
Toke Høiland-Jørgensen0cdbb4b2019-06-28 11:12:35 +0200124 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
125 * verifier prevents writes from the BPF side
126 */
127 attr->map_flags |= BPF_F_RDONLY_PROG;
128
John Fastabend546ac1f2017-07-17 09:28:56 -0700129
Jakub Kicinskibd475642018-01-11 20:29:06 -0800130 bpf_map_init_from_attr(&dtab->map, attr);
John Fastabend546ac1f2017-07-17 09:28:56 -0700131
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200132 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
133 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
134
135 if (!dtab->n_buckets) /* Overflow check */
136 return -EINVAL;
Toke Høiland-Jørgensen05679ca2019-10-17 12:57:02 +0200137 cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100138 } else {
139 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200140 }
141
Roman Gushchinb936ca62019-05-29 18:03:58 -0700142 /* if map size is larger than memlock limit, reject it */
Roman Gushchinc85d6912019-05-29 18:03:59 -0700143 err = bpf_map_charge_init(&dtab->map.memory, cost);
John Fastabend546ac1f2017-07-17 09:28:56 -0700144 if (err)
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200145 return -EINVAL;
Tobias Klauser582db7e2017-09-18 15:03:46 +0200146
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200147 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
148 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
149 if (!dtab->dev_index_head)
Björn Töpel96360002019-12-19 07:10:03 +0100150 goto free_charge;
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200151
152 spin_lock_init(&dtab->index_lock);
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100153 } else {
154 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
155 sizeof(struct bpf_dtab_netdev *),
156 dtab->map.numa_node);
157 if (!dtab->netdev_map)
Björn Töpel96360002019-12-19 07:10:03 +0100158 goto free_charge;
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200159 }
160
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200161 return 0;
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +0200162
Roman Gushchinb936ca62019-05-29 18:03:58 -0700163free_charge:
164 bpf_map_charge_finish(&dtab->map.memory);
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200165 return -ENOMEM;
166}
167
168static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
169{
170 struct bpf_dtab *dtab;
171 int err;
172
173 if (!capable(CAP_NET_ADMIN))
174 return ERR_PTR(-EPERM);
175
176 dtab = kzalloc(sizeof(*dtab), GFP_USER);
177 if (!dtab)
178 return ERR_PTR(-ENOMEM);
179
180 err = dev_map_init_map(dtab, attr);
181 if (err) {
182 kfree(dtab);
183 return ERR_PTR(err);
184 }
185
186 spin_lock(&dev_map_lock);
187 list_add_tail_rcu(&dtab->list, &dev_map_list);
188 spin_unlock(&dev_map_lock);
189
190 return &dtab->map;
John Fastabend546ac1f2017-07-17 09:28:56 -0700191}
192
193static void dev_map_free(struct bpf_map *map)
194{
195 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
Björn Töpel0536b852019-12-19 07:09:59 +0100196 int i;
John Fastabend546ac1f2017-07-17 09:28:56 -0700197
198 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
199 * so the programs (can be more than one that used this map) were
John Fastabend42a84a82020-01-26 16:14:00 -0800200 * disconnected from events. The following synchronize_rcu() guarantees
201 * both rcu read critical sections complete and waits for
202 * preempt-disable regions (NAPI being the relevant context here) so we
203 * are certain there will be no further reads against the netdev_map and
204 * all flush operations are complete. Flush operations can only be done
205 * from NAPI context for this reason.
John Fastabend546ac1f2017-07-17 09:28:56 -0700206 */
Daniel Borkmann274043c2017-08-21 01:48:12 +0200207
208 spin_lock(&dev_map_lock);
209 list_del_rcu(&dtab->list);
210 spin_unlock(&dev_map_lock);
211
Daniel Borkmannf6069b92018-08-17 23:26:14 +0200212 bpf_clear_redirect_map(map);
John Fastabend546ac1f2017-07-17 09:28:56 -0700213 synchronize_rcu();
214
Eric Dumazet2baae352019-05-13 09:59:16 -0700215 /* Make sure prior __dev_map_entry_free() have completed. */
216 rcu_barrier();
217
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100218 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
219 for (i = 0; i < dtab->n_buckets; i++) {
220 struct bpf_dtab_netdev *dev;
221 struct hlist_head *head;
222 struct hlist_node *next;
John Fastabend546ac1f2017-07-17 09:28:56 -0700223
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100224 head = dev_map_index_hash(dtab, i);
John Fastabend546ac1f2017-07-17 09:28:56 -0700225
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100226 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
227 hlist_del_rcu(&dev->index_hlist);
David Ahernfbee97f2020-05-29 16:07:13 -0600228 if (dev->xdp_prog)
229 bpf_prog_put(dev->xdp_prog);
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100230 dev_put(dev->dev);
231 kfree(dev);
232 }
233 }
234
235 kfree(dtab->dev_index_head);
236 } else {
237 for (i = 0; i < dtab->map.max_entries; i++) {
238 struct bpf_dtab_netdev *dev;
239
240 dev = dtab->netdev_map[i];
241 if (!dev)
242 continue;
243
David Ahernfbee97f2020-05-29 16:07:13 -0600244 if (dev->xdp_prog)
245 bpf_prog_put(dev->xdp_prog);
Toke Høiland-Jørgensen071cdec2019-11-21 14:36:12 +0100246 dev_put(dev->dev);
247 kfree(dev);
248 }
249
250 bpf_map_area_free(dtab->netdev_map);
John Fastabend546ac1f2017-07-17 09:28:56 -0700251 }
252
John Fastabend546ac1f2017-07-17 09:28:56 -0700253 kfree(dtab);
254}
255
256static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
257{
258 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
259 u32 index = key ? *(u32 *)key : U32_MAX;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200260 u32 *next = next_key;
John Fastabend546ac1f2017-07-17 09:28:56 -0700261
262 if (index >= dtab->map.max_entries) {
263 *next = 0;
264 return 0;
265 }
266
267 if (index == dtab->map.max_entries - 1)
268 return -ENOENT;
John Fastabend546ac1f2017-07-17 09:28:56 -0700269 *next = index + 1;
270 return 0;
271}
272
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200273struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
274{
275 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
276 struct hlist_head *head = dev_map_index_hash(dtab, key);
277 struct bpf_dtab_netdev *dev;
278
Amol Grover485ec2e2020-01-23 17:34:38 +0530279 hlist_for_each_entry_rcu(dev, head, index_hlist,
280 lockdep_is_held(&dtab->index_lock))
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200281 if (dev->idx == key)
282 return dev;
283
284 return NULL;
285}
286
287static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
288 void *next_key)
289{
290 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
291 u32 idx, *next = next_key;
292 struct bpf_dtab_netdev *dev, *next_dev;
293 struct hlist_head *head;
294 int i = 0;
295
296 if (!key)
297 goto find_first;
298
299 idx = *(u32 *)key;
300
301 dev = __dev_map_hash_lookup_elem(map, idx);
302 if (!dev)
303 goto find_first;
304
305 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
306 struct bpf_dtab_netdev, index_hlist);
307
308 if (next_dev) {
309 *next = next_dev->idx;
310 return 0;
311 }
312
313 i = idx & (dtab->n_buckets - 1);
314 i++;
315
316 find_first:
317 for (; i < dtab->n_buckets; i++) {
318 head = dev_map_index_hash(dtab, i);
319
320 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
321 struct bpf_dtab_netdev,
322 index_hlist);
323 if (next_dev) {
324 *next = next_dev->idx;
325 return 0;
326 }
327 }
328
329 return -ENOENT;
330}
331
David Ahernfbee97f2020-05-29 16:07:13 -0600332bool dev_map_can_have_prog(struct bpf_map *map)
333{
334 if ((map->map_type == BPF_MAP_TYPE_DEVMAP ||
335 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) &&
336 map->value_size != offsetofend(struct bpf_devmap_val, ifindex))
337 return true;
338
339 return false;
340}
341
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100342static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200343{
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100344 struct net_device *dev = bq->dev;
Jesper Dangaard Brouere74de522018-05-24 16:46:17 +0200345 int sent = 0, drops = 0, err = 0;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200346 int i;
347
348 if (unlikely(!bq->count))
349 return 0;
350
351 for (i = 0; i < bq->count; i++) {
352 struct xdp_frame *xdpf = bq->q[i];
353
354 prefetch(xdpf);
355 }
356
Jesper Dangaard Brouerc1ece6b2018-05-31 11:00:23 +0200357 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200358 if (sent < 0) {
Jesper Dangaard Brouere74de522018-05-24 16:46:17 +0200359 err = sent;
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200360 sent = 0;
361 goto error;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200362 }
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200363 drops = bq->count - sent;
364out:
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200365 bq->count = 0;
366
Jesper Dangaard Brouer58aa94f2020-01-16 16:14:46 +0100367 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200368 bq->dev_rx = NULL;
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +0200369 __list_del_clearprev(&bq->flush_node);
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200370 return 0;
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200371error:
372 /* If ndo_xdp_xmit fails with an errno, no frames have been
373 * xmit'ed and it's our responsibility to them free all.
374 */
375 for (i = 0; i < bq->count; i++) {
376 struct xdp_frame *xdpf = bq->q[i];
377
Björn Töpel0536b852019-12-19 07:09:59 +0100378 xdp_return_frame_rx_napi(xdpf);
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200379 drops++;
380 }
381 goto out;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200382}
383
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100384/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
John Fastabend11393cc2017-07-17 09:29:40 -0700385 * from the driver before returning from its napi->poll() routine. The poll()
386 * routine is called either from busy_poll context or net_rx_action signaled
387 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +0200388 * net device can be torn down. On devmap tear down we ensure the flush list
389 * is empty before completing to ensure all flush operations have completed.
John Fastabendb23bfa52020-01-26 16:14:02 -0800390 * When drivers update the bpf program they may need to ensure any flush ops
391 * are also complete. Using synchronize_rcu or call_rcu will suffice for this
392 * because both wait for napi context to exit.
John Fastabend11393cc2017-07-17 09:29:40 -0700393 */
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100394void __dev_flush(void)
John Fastabend11393cc2017-07-17 09:29:40 -0700395{
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100396 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100397 struct xdp_dev_bulk_queue *bq, *tmp;
John Fastabend11393cc2017-07-17 09:29:40 -0700398
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +0200399 list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
Björn Töpel0536b852019-12-19 07:09:59 +0100400 bq_xmit_all(bq, XDP_XMIT_FLUSH);
John Fastabend11393cc2017-07-17 09:29:40 -0700401}
402
John Fastabend546ac1f2017-07-17 09:28:56 -0700403/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
404 * update happens in parallel here a dev_put wont happen until after reading the
405 * ifindex.
406 */
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200407struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
John Fastabend546ac1f2017-07-17 09:28:56 -0700408{
409 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200410 struct bpf_dtab_netdev *obj;
John Fastabend546ac1f2017-07-17 09:28:56 -0700411
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200412 if (key >= map->max_entries)
John Fastabend546ac1f2017-07-17 09:28:56 -0700413 return NULL;
414
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200415 obj = READ_ONCE(dtab->netdev_map[key]);
416 return obj;
417}
418
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200419/* Runs under RCU-read-side, plus in softirq under NAPI protection.
420 * Thus, safe percpu variable access.
421 */
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100422static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200423 struct net_device *dev_rx)
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200424{
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100425 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100426 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200427
428 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
Björn Töpel0536b852019-12-19 07:09:59 +0100429 bq_xmit_all(bq, 0);
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200430
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200431 /* Ingress dev_rx will be the same for all xdp_frame's in
432 * bulk_queue, because bq stored per-CPU and must be flushed
433 * from net_device drivers NAPI func end.
434 */
435 if (!bq->dev_rx)
436 bq->dev_rx = dev_rx;
437
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200438 bq->q[bq->count++] = xdpf;
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +0200439
440 if (!bq->flush_node.prev)
441 list_add(&bq->flush_node, flush_list);
442
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200443 return 0;
444}
445
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100446static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
447 struct net_device *dev_rx)
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200448{
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200449 struct xdp_frame *xdpf;
Toshiaki Makitad8d72182018-07-06 11:49:00 +0900450 int err;
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200451
452 if (!dev->netdev_ops->ndo_xdp_xmit)
453 return -EOPNOTSUPP;
454
Toshiaki Makitad8d72182018-07-06 11:49:00 +0900455 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
456 if (unlikely(err))
457 return err;
458
Lorenzo Bianconi1b698fa2020-05-28 22:47:29 +0200459 xdpf = xdp_convert_buff_to_frame(xdp);
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200460 if (unlikely(!xdpf))
461 return -EOVERFLOW;
462
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100463 return bq_enqueue(dev, xdpf, dev_rx);
John Fastabend546ac1f2017-07-17 09:28:56 -0700464}
465
David Ahernfbee97f2020-05-29 16:07:13 -0600466static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
467 struct xdp_buff *xdp,
468 struct bpf_prog *xdp_prog)
469{
David Ahern64b59022020-05-29 16:07:14 -0600470 struct xdp_txq_info txq = { .dev = dev };
David Ahernfbee97f2020-05-29 16:07:13 -0600471 u32 act;
472
David Ahern26afa0a2020-06-08 09:17:23 -0600473 xdp_set_data_meta_invalid(xdp);
David Ahern64b59022020-05-29 16:07:14 -0600474 xdp->txq = &txq;
475
David Ahernfbee97f2020-05-29 16:07:13 -0600476 act = bpf_prog_run_xdp(xdp_prog, xdp);
477 switch (act) {
478 case XDP_PASS:
479 return xdp;
480 case XDP_DROP:
481 break;
482 default:
483 bpf_warn_invalid_xdp_action(act);
484 fallthrough;
485 case XDP_ABORTED:
486 trace_xdp_exception(dev, xdp_prog, act);
487 break;
488 }
489
490 xdp_return_buff(xdp);
491 return NULL;
492}
493
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100494int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
495 struct net_device *dev_rx)
496{
497 return __xdp_enqueue(dev, xdp, dev_rx);
498}
499
500int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
501 struct net_device *dev_rx)
502{
503 struct net_device *dev = dst->dev;
504
David Ahernfbee97f2020-05-29 16:07:13 -0600505 if (dst->xdp_prog) {
506 xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog);
507 if (!xdp)
508 return 0;
509 }
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100510 return __xdp_enqueue(dev, xdp, dev_rx);
511}
512
Toshiaki Makita6d5fc192018-06-14 11:07:42 +0900513int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
514 struct bpf_prog *xdp_prog)
515{
516 int err;
517
Toshiaki Makitad8d72182018-07-06 11:49:00 +0900518 err = xdp_ok_fwd_dev(dst->dev, skb->len);
Toshiaki Makita6d5fc192018-06-14 11:07:42 +0900519 if (unlikely(err))
520 return err;
521 skb->dev = dst->dev;
522 generic_xdp_tx(skb, xdp_prog);
523
524 return 0;
525}
526
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200527static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
John Fastabend11393cc2017-07-17 09:29:40 -0700528{
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200529 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200530
David Ahern7f1c0422020-05-29 16:07:12 -0600531 return obj ? &obj->val : NULL;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200532}
533
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200534static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
535{
536 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
537 *(u32 *)key);
David Ahern7f1c0422020-05-29 16:07:12 -0600538 return obj ? &obj->val : NULL;
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200539}
540
John Fastabend546ac1f2017-07-17 09:28:56 -0700541static void __dev_map_entry_free(struct rcu_head *rcu)
542{
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200543 struct bpf_dtab_netdev *dev;
John Fastabend546ac1f2017-07-17 09:28:56 -0700544
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200545 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
David Ahernfbee97f2020-05-29 16:07:13 -0600546 if (dev->xdp_prog)
547 bpf_prog_put(dev->xdp_prog);
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200548 dev_put(dev->dev);
549 kfree(dev);
John Fastabend546ac1f2017-07-17 09:28:56 -0700550}
551
552static int dev_map_delete_elem(struct bpf_map *map, void *key)
553{
554 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
555 struct bpf_dtab_netdev *old_dev;
556 int k = *(u32 *)key;
557
558 if (k >= map->max_entries)
559 return -EINVAL;
560
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200561 /* Use call_rcu() here to ensure any rcu critical sections have
John Fastabend42a84a82020-01-26 16:14:00 -0800562 * completed as well as any flush operations because call_rcu
563 * will wait for preempt-disable region to complete, NAPI in this
564 * context. And additionally, the driver tear down ensures all
565 * soft irqs are complete before removing the net device in the
566 * case of dev_put equals zero.
John Fastabend546ac1f2017-07-17 09:28:56 -0700567 */
568 old_dev = xchg(&dtab->netdev_map[k], NULL);
569 if (old_dev)
570 call_rcu(&old_dev->rcu, __dev_map_entry_free);
571 return 0;
572}
573
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200574static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
575{
576 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
577 struct bpf_dtab_netdev *old_dev;
578 int k = *(u32 *)key;
579 unsigned long flags;
580 int ret = -ENOENT;
581
582 spin_lock_irqsave(&dtab->index_lock, flags);
583
584 old_dev = __dev_map_hash_lookup_elem(map, k);
585 if (old_dev) {
586 dtab->items--;
587 hlist_del_init_rcu(&old_dev->index_hlist);
588 call_rcu(&old_dev->rcu, __dev_map_entry_free);
589 ret = 0;
590 }
591 spin_unlock_irqrestore(&dtab->index_lock, flags);
592
593 return ret;
594}
595
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200596static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
597 struct bpf_dtab *dtab,
David Ahern7f1c0422020-05-29 16:07:12 -0600598 struct bpf_devmap_val *val,
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200599 unsigned int idx)
600{
David Ahernfbee97f2020-05-29 16:07:13 -0600601 struct bpf_prog *prog = NULL;
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200602 struct bpf_dtab_netdev *dev;
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200603
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100604 dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
605 dtab->map.numa_node);
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200606 if (!dev)
607 return ERR_PTR(-ENOMEM);
608
David Ahern7f1c0422020-05-29 16:07:12 -0600609 dev->dev = dev_get_by_index(net, val->ifindex);
610 if (!dev->dev)
611 goto err_out;
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200612
Jesper Dangaard Brouer281920b2020-06-09 15:31:46 +0200613 if (val->bpf_prog.fd > 0) {
David Ahernfbee97f2020-05-29 16:07:13 -0600614 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
615 BPF_PROG_TYPE_XDP, false);
616 if (IS_ERR(prog))
617 goto err_put_dev;
618 if (prog->expected_attach_type != BPF_XDP_DEVMAP)
619 goto err_put_prog;
620 }
621
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200622 dev->idx = idx;
623 dev->dtab = dtab;
David Ahernfbee97f2020-05-29 16:07:13 -0600624 if (prog) {
625 dev->xdp_prog = prog;
626 dev->val.bpf_prog.id = prog->aux->id;
627 } else {
628 dev->xdp_prog = NULL;
629 dev->val.bpf_prog.id = 0;
630 }
David Ahern7f1c0422020-05-29 16:07:12 -0600631 dev->val.ifindex = val->ifindex;
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200632
633 return dev;
David Ahernfbee97f2020-05-29 16:07:13 -0600634err_put_prog:
635 bpf_prog_put(prog);
636err_put_dev:
637 dev_put(dev->dev);
David Ahern7f1c0422020-05-29 16:07:12 -0600638err_out:
639 kfree(dev);
640 return ERR_PTR(-EINVAL);
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200641}
642
643static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
644 void *key, void *value, u64 map_flags)
John Fastabend546ac1f2017-07-17 09:28:56 -0700645{
646 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
John Fastabend546ac1f2017-07-17 09:28:56 -0700647 struct bpf_dtab_netdev *dev, *old_dev;
Jesper Dangaard Brouer281920b2020-06-09 15:31:46 +0200648 struct bpf_devmap_val val = {};
Toke Høiland-Jørgensend5df2832019-06-28 11:12:34 +0200649 u32 i = *(u32 *)key;
John Fastabend546ac1f2017-07-17 09:28:56 -0700650
651 if (unlikely(map_flags > BPF_EXIST))
652 return -EINVAL;
John Fastabend546ac1f2017-07-17 09:28:56 -0700653 if (unlikely(i >= dtab->map.max_entries))
654 return -E2BIG;
John Fastabend546ac1f2017-07-17 09:28:56 -0700655 if (unlikely(map_flags == BPF_NOEXIST))
656 return -EEXIST;
657
David Ahern7f1c0422020-05-29 16:07:12 -0600658 /* already verified value_size <= sizeof val */
659 memcpy(&val, value, map->value_size);
660
661 if (!val.ifindex) {
John Fastabend546ac1f2017-07-17 09:28:56 -0700662 dev = NULL;
David Ahernfbee97f2020-05-29 16:07:13 -0600663 /* can not specify fd if ifindex is 0 */
Jesper Dangaard Brouer281920b2020-06-09 15:31:46 +0200664 if (val.bpf_prog.fd > 0)
David Ahernfbee97f2020-05-29 16:07:13 -0600665 return -EINVAL;
John Fastabend546ac1f2017-07-17 09:28:56 -0700666 } else {
David Ahern7f1c0422020-05-29 16:07:12 -0600667 dev = __dev_map_alloc_node(net, dtab, &val, i);
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200668 if (IS_ERR(dev))
669 return PTR_ERR(dev);
John Fastabend546ac1f2017-07-17 09:28:56 -0700670 }
671
672 /* Use call_rcu() here to ensure rcu critical sections have completed
673 * Remembering the driver side flush operation will happen before the
674 * net device is removed.
675 */
676 old_dev = xchg(&dtab->netdev_map[i], dev);
677 if (old_dev)
678 call_rcu(&old_dev->rcu, __dev_map_entry_free);
679
680 return 0;
681}
682
Toke Høiland-Jørgensenfca16e52019-07-26 18:06:53 +0200683static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
684 u64 map_flags)
685{
686 return __dev_map_update_elem(current->nsproxy->net_ns,
687 map, key, value, map_flags);
688}
689
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200690static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
691 void *key, void *value, u64 map_flags)
692{
693 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
694 struct bpf_dtab_netdev *dev, *old_dev;
Jesper Dangaard Brouer281920b2020-06-09 15:31:46 +0200695 struct bpf_devmap_val val = {};
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200696 u32 idx = *(u32 *)key;
697 unsigned long flags;
Toke Høiland-Jørgensenaf58e7e2019-09-08 09:20:16 +0100698 int err = -EEXIST;
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200699
David Ahern7f1c0422020-05-29 16:07:12 -0600700 /* already verified value_size <= sizeof val */
701 memcpy(&val, value, map->value_size);
702
703 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200704 return -EINVAL;
705
Toke Høiland-Jørgensenaf58e7e2019-09-08 09:20:16 +0100706 spin_lock_irqsave(&dtab->index_lock, flags);
707
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200708 old_dev = __dev_map_hash_lookup_elem(map, idx);
709 if (old_dev && (map_flags & BPF_NOEXIST))
Toke Høiland-Jørgensenaf58e7e2019-09-08 09:20:16 +0100710 goto out_err;
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200711
David Ahern7f1c0422020-05-29 16:07:12 -0600712 dev = __dev_map_alloc_node(net, dtab, &val, idx);
Toke Høiland-Jørgensenaf58e7e2019-09-08 09:20:16 +0100713 if (IS_ERR(dev)) {
714 err = PTR_ERR(dev);
715 goto out_err;
716 }
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200717
718 if (old_dev) {
719 hlist_del_rcu(&old_dev->index_hlist);
720 } else {
721 if (dtab->items >= dtab->map.max_entries) {
722 spin_unlock_irqrestore(&dtab->index_lock, flags);
723 call_rcu(&dev->rcu, __dev_map_entry_free);
724 return -E2BIG;
725 }
726 dtab->items++;
727 }
728
729 hlist_add_head_rcu(&dev->index_hlist,
730 dev_map_index_hash(dtab, idx));
731 spin_unlock_irqrestore(&dtab->index_lock, flags);
732
733 if (old_dev)
734 call_rcu(&old_dev->rcu, __dev_map_entry_free);
735
736 return 0;
Toke Høiland-Jørgensenaf58e7e2019-09-08 09:20:16 +0100737
738out_err:
739 spin_unlock_irqrestore(&dtab->index_lock, flags);
740 return err;
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200741}
742
743static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
744 u64 map_flags)
745{
746 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
747 map, key, value, map_flags);
748}
749
John Fastabend546ac1f2017-07-17 09:28:56 -0700750const struct bpf_map_ops dev_map_ops = {
751 .map_alloc = dev_map_alloc,
752 .map_free = dev_map_free,
753 .map_get_next_key = dev_map_get_next_key,
754 .map_lookup_elem = dev_map_lookup_elem,
755 .map_update_elem = dev_map_update_elem,
756 .map_delete_elem = dev_map_delete_elem,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200757 .map_check_btf = map_check_no_btf,
John Fastabend546ac1f2017-07-17 09:28:56 -0700758};
John Fastabend2ddf71e2017-07-17 09:30:02 -0700759
Toke Høiland-Jørgensen6f9d4512019-07-26 18:06:55 +0200760const struct bpf_map_ops dev_map_hash_ops = {
761 .map_alloc = dev_map_alloc,
762 .map_free = dev_map_free,
763 .map_get_next_key = dev_map_hash_get_next_key,
764 .map_lookup_elem = dev_map_hash_lookup_elem,
765 .map_update_elem = dev_map_hash_update_elem,
766 .map_delete_elem = dev_map_hash_delete_elem,
767 .map_check_btf = map_check_no_btf,
768};
769
Toke Høiland-Jørgensence197d82019-10-19 13:19:31 +0200770static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
771 struct net_device *netdev)
772{
773 unsigned long flags;
774 u32 i;
775
776 spin_lock_irqsave(&dtab->index_lock, flags);
777 for (i = 0; i < dtab->n_buckets; i++) {
778 struct bpf_dtab_netdev *dev;
779 struct hlist_head *head;
780 struct hlist_node *next;
781
782 head = dev_map_index_hash(dtab, i);
783
784 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
785 if (netdev != dev->dev)
786 continue;
787
788 dtab->items--;
789 hlist_del_rcu(&dev->index_hlist);
790 call_rcu(&dev->rcu, __dev_map_entry_free);
791 }
792 }
793 spin_unlock_irqrestore(&dtab->index_lock, flags);
794}
795
John Fastabend2ddf71e2017-07-17 09:30:02 -0700796static int dev_map_notification(struct notifier_block *notifier,
797 ulong event, void *ptr)
798{
799 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
800 struct bpf_dtab *dtab;
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100801 int i, cpu;
John Fastabend2ddf71e2017-07-17 09:30:02 -0700802
803 switch (event) {
Toke Høiland-Jørgensen75ccae62020-01-16 16:14:44 +0100804 case NETDEV_REGISTER:
805 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
806 break;
807
808 /* will be freed in free_netdev() */
809 netdev->xdp_bulkq =
810 __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue),
811 sizeof(void *), GFP_ATOMIC);
812 if (!netdev->xdp_bulkq)
813 return NOTIFY_BAD;
814
815 for_each_possible_cpu(cpu)
816 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
817 break;
John Fastabend2ddf71e2017-07-17 09:30:02 -0700818 case NETDEV_UNREGISTER:
John Fastabend4cc7b952017-08-04 22:02:19 -0700819 /* This rcu_read_lock/unlock pair is needed because
820 * dev_map_list is an RCU list AND to ensure a delete
821 * operation does not free a netdev_map entry while we
822 * are comparing it against the netdev being unregistered.
823 */
824 rcu_read_lock();
825 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
Toke Høiland-Jørgensence197d82019-10-19 13:19:31 +0200826 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
827 dev_map_hash_remove_netdev(dtab, netdev);
828 continue;
829 }
830
John Fastabend2ddf71e2017-07-17 09:30:02 -0700831 for (i = 0; i < dtab->map.max_entries; i++) {
John Fastabend4cc7b952017-08-04 22:02:19 -0700832 struct bpf_dtab_netdev *dev, *odev;
John Fastabend2ddf71e2017-07-17 09:30:02 -0700833
John Fastabend4cc7b952017-08-04 22:02:19 -0700834 dev = READ_ONCE(dtab->netdev_map[i]);
Taehee Yoof592f802018-10-24 20:15:17 +0900835 if (!dev || netdev != dev->dev)
John Fastabend2ddf71e2017-07-17 09:30:02 -0700836 continue;
John Fastabend4cc7b952017-08-04 22:02:19 -0700837 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
838 if (dev == odev)
John Fastabend2ddf71e2017-07-17 09:30:02 -0700839 call_rcu(&dev->rcu,
840 __dev_map_entry_free);
841 }
842 }
John Fastabend4cc7b952017-08-04 22:02:19 -0700843 rcu_read_unlock();
John Fastabend2ddf71e2017-07-17 09:30:02 -0700844 break;
845 default:
846 break;
847 }
848 return NOTIFY_OK;
849}
850
851static struct notifier_block dev_map_notifier = {
852 .notifier_call = dev_map_notification,
853};
854
855static int __init dev_map_init(void)
856{
Björn Töpel96360002019-12-19 07:10:03 +0100857 int cpu;
858
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200859 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
860 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
861 offsetof(struct _bpf_dtab_netdev, dev));
John Fastabend2ddf71e2017-07-17 09:30:02 -0700862 register_netdevice_notifier(&dev_map_notifier);
Björn Töpel96360002019-12-19 07:10:03 +0100863
864 for_each_possible_cpu(cpu)
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100865 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
John Fastabend2ddf71e2017-07-17 09:30:02 -0700866 return 0;
867}
868
869subsys_initcall(dev_map_init);