blob: 1fe3fe60508aa9a7cab3ed37ae09aa23fab118a0 [file] [log] [blame]
John Fastabend546ac1f2017-07-17 09:28:56 -07001/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12
13/* Devmaps primary use is as a backend map for XDP BPF helper call
14 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
15 * spent some effort to ensure the datapath with redirect maps does not use
16 * any locking. This is a quick note on the details.
17 *
18 * We have three possible paths to get into the devmap control plane bpf
19 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
20 * will invoke an update, delete, or lookup operation. To ensure updates and
21 * deletes appear atomic from the datapath side xchg() is used to modify the
22 * netdev_map array. Then because the datapath does a lookup into the netdev_map
23 * array (read-only) from an RCU critical section we use call_rcu() to wait for
24 * an rcu grace period before free'ing the old data structures. This ensures the
25 * datapath always has a valid copy. However, the datapath does a "flush"
26 * operation that pushes any pending packets in the driver outside the RCU
27 * critical section. Each bpf_dtab_netdev tracks these pending operations using
28 * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
29 * until all bits are cleared indicating outstanding flush operations have
30 * completed.
31 *
32 * BPF syscalls may race with BPF program calls on any of the update, delete
33 * or lookup operations. As noted above the xchg() operation also keep the
34 * netdev_map consistent in this case. From the devmap side BPF programs
35 * calling into these operations are the same as multiple user space threads
36 * making system calls.
John Fastabend2ddf71e2017-07-17 09:30:02 -070037 *
38 * Finally, any of the above may race with a netdev_unregister notifier. The
39 * unregister notifier must search for net devices in the map structure that
40 * contain a reference to the net device and remove them. This is a two step
41 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
42 * check to see if the ifindex is the same as the net_device being removed.
John Fastabend4cc7b952017-08-04 22:02:19 -070043 * When removing the dev a cmpxchg() is used to ensure the correct dev is
44 * removed, in the case of a concurrent update or delete operation it is
45 * possible that the initially referenced dev is no longer in the map. As the
46 * notifier hook walks the map we know that new dev references can not be
47 * added by the user because core infrastructure ensures dev_get_by_index()
48 * calls will fail at this point.
John Fastabend546ac1f2017-07-17 09:28:56 -070049 */
50#include <linux/bpf.h>
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +020051#include <net/xdp.h>
John Fastabend546ac1f2017-07-17 09:28:56 -070052#include <linux/filter.h>
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +020053#include <trace/events/xdp.h>
John Fastabend546ac1f2017-07-17 09:28:56 -070054
Chenbo Feng6e71b042017-10-18 13:00:22 -070055#define DEV_CREATE_FLAG_MASK \
56 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
57
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +020058#define DEV_MAP_BULK_SIZE 16
59struct xdp_bulk_queue {
60 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +020061 struct net_device *dev_rx;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +020062 unsigned int count;
63};
64
John Fastabend546ac1f2017-07-17 09:28:56 -070065struct bpf_dtab_netdev {
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +020066 struct net_device *dev; /* must be first member, due to tracepoint */
John Fastabend546ac1f2017-07-17 09:28:56 -070067 struct bpf_dtab *dtab;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +020068 unsigned int bit;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +020069 struct xdp_bulk_queue __percpu *bulkq;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +020070 struct rcu_head rcu;
John Fastabend546ac1f2017-07-17 09:28:56 -070071};
72
73struct bpf_dtab {
74 struct bpf_map map;
75 struct bpf_dtab_netdev **netdev_map;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +020076 unsigned long __percpu *flush_needed;
John Fastabend2ddf71e2017-07-17 09:30:02 -070077 struct list_head list;
John Fastabend546ac1f2017-07-17 09:28:56 -070078};
79
John Fastabend4cc7b952017-08-04 22:02:19 -070080static DEFINE_SPINLOCK(dev_map_lock);
John Fastabend2ddf71e2017-07-17 09:30:02 -070081static LIST_HEAD(dev_map_list);
82
Daniel Borkmannaf4d0452017-08-23 01:47:54 +020083static u64 dev_map_bitmap_size(const union bpf_attr *attr)
84{
John Fastabend8695a532017-10-19 09:03:52 -070085 return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
Daniel Borkmannaf4d0452017-08-23 01:47:54 +020086}
87
John Fastabend546ac1f2017-07-17 09:28:56 -070088static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
89{
90 struct bpf_dtab *dtab;
Tobias Klauser582db7e2017-09-18 15:03:46 +020091 int err = -EINVAL;
John Fastabend546ac1f2017-07-17 09:28:56 -070092 u64 cost;
John Fastabend546ac1f2017-07-17 09:28:56 -070093
John Fastabend9ef2a8c2017-10-18 07:11:44 -070094 if (!capable(CAP_NET_ADMIN))
95 return ERR_PTR(-EPERM);
96
John Fastabend546ac1f2017-07-17 09:28:56 -070097 /* check sanity of attributes */
98 if (attr->max_entries == 0 || attr->key_size != 4 ||
Chenbo Feng6e71b042017-10-18 13:00:22 -070099 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
John Fastabend546ac1f2017-07-17 09:28:56 -0700100 return ERR_PTR(-EINVAL);
101
John Fastabend546ac1f2017-07-17 09:28:56 -0700102 dtab = kzalloc(sizeof(*dtab), GFP_USER);
103 if (!dtab)
104 return ERR_PTR(-ENOMEM);
105
Jakub Kicinskibd475642018-01-11 20:29:06 -0800106 bpf_map_init_from_attr(&dtab->map, attr);
John Fastabend546ac1f2017-07-17 09:28:56 -0700107
John Fastabend546ac1f2017-07-17 09:28:56 -0700108 /* make sure page count doesn't overflow */
109 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200110 cost += dev_map_bitmap_size(attr) * num_possible_cpus();
John Fastabend546ac1f2017-07-17 09:28:56 -0700111 if (cost >= U32_MAX - PAGE_SIZE)
112 goto free_dtab;
113
114 dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
115
116 /* if map size is larger than memlock limit, reject it early */
117 err = bpf_map_precharge_memlock(dtab->map.pages);
118 if (err)
119 goto free_dtab;
120
Tobias Klauser582db7e2017-09-18 15:03:46 +0200121 err = -ENOMEM;
122
John Fastabend11393cc2017-07-17 09:29:40 -0700123 /* A per cpu bitfield with a bit per possible net device */
Daniel Borkmann82f8dd22017-10-17 16:55:53 +0200124 dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
125 __alignof__(unsigned long),
126 GFP_KERNEL | __GFP_NOWARN);
John Fastabend11393cc2017-07-17 09:29:40 -0700127 if (!dtab->flush_needed)
128 goto free_dtab;
129
John Fastabend546ac1f2017-07-17 09:28:56 -0700130 dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700131 sizeof(struct bpf_dtab_netdev *),
132 dtab->map.numa_node);
John Fastabend546ac1f2017-07-17 09:28:56 -0700133 if (!dtab->netdev_map)
134 goto free_dtab;
135
John Fastabend4cc7b952017-08-04 22:02:19 -0700136 spin_lock(&dev_map_lock);
137 list_add_tail_rcu(&dtab->list, &dev_map_list);
138 spin_unlock(&dev_map_lock);
John Fastabend546ac1f2017-07-17 09:28:56 -0700139
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200140 return &dtab->map;
John Fastabend546ac1f2017-07-17 09:28:56 -0700141free_dtab:
John Fastabend11393cc2017-07-17 09:29:40 -0700142 free_percpu(dtab->flush_needed);
John Fastabend546ac1f2017-07-17 09:28:56 -0700143 kfree(dtab);
Tobias Klauser582db7e2017-09-18 15:03:46 +0200144 return ERR_PTR(err);
John Fastabend546ac1f2017-07-17 09:28:56 -0700145}
146
147static void dev_map_free(struct bpf_map *map)
148{
149 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
John Fastabend11393cc2017-07-17 09:29:40 -0700150 int i, cpu;
John Fastabend546ac1f2017-07-17 09:28:56 -0700151
152 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
153 * so the programs (can be more than one that used this map) were
154 * disconnected from events. Wait for outstanding critical sections in
155 * these programs to complete. The rcu critical section only guarantees
156 * no further reads against netdev_map. It does __not__ ensure pending
157 * flush operations (if any) are complete.
158 */
Daniel Borkmann274043c2017-08-21 01:48:12 +0200159
160 spin_lock(&dev_map_lock);
161 list_del_rcu(&dtab->list);
162 spin_unlock(&dev_map_lock);
163
John Fastabend546ac1f2017-07-17 09:28:56 -0700164 synchronize_rcu();
165
John Fastabend11393cc2017-07-17 09:29:40 -0700166 /* To ensure all pending flush operations have completed wait for flush
167 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
168 * Because the above synchronize_rcu() ensures the map is disconnected
169 * from the program we can assume no new bits will be set.
170 */
171 for_each_online_cpu(cpu) {
172 unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
173
174 while (!bitmap_empty(bitmap, dtab->map.max_entries))
John Fastabend374fb012017-09-08 14:01:10 -0700175 cond_resched();
John Fastabend11393cc2017-07-17 09:29:40 -0700176 }
177
John Fastabend546ac1f2017-07-17 09:28:56 -0700178 for (i = 0; i < dtab->map.max_entries; i++) {
179 struct bpf_dtab_netdev *dev;
180
181 dev = dtab->netdev_map[i];
182 if (!dev)
183 continue;
184
185 dev_put(dev->dev);
186 kfree(dev);
187 }
188
John Fastabend11393cc2017-07-17 09:29:40 -0700189 free_percpu(dtab->flush_needed);
John Fastabend546ac1f2017-07-17 09:28:56 -0700190 bpf_map_area_free(dtab->netdev_map);
191 kfree(dtab);
192}
193
194static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
195{
196 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
197 u32 index = key ? *(u32 *)key : U32_MAX;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200198 u32 *next = next_key;
John Fastabend546ac1f2017-07-17 09:28:56 -0700199
200 if (index >= dtab->map.max_entries) {
201 *next = 0;
202 return 0;
203 }
204
205 if (index == dtab->map.max_entries - 1)
206 return -ENOENT;
John Fastabend546ac1f2017-07-17 09:28:56 -0700207 *next = index + 1;
208 return 0;
209}
210
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200211void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
John Fastabend11393cc2017-07-17 09:29:40 -0700212{
213 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
214 unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
215
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200216 __set_bit(bit, bitmap);
John Fastabend97f91a72017-07-17 09:29:18 -0700217}
218
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200219static int bq_xmit_all(struct bpf_dtab_netdev *obj,
220 struct xdp_bulk_queue *bq)
221{
222 struct net_device *dev = obj->dev;
Jesper Dangaard Brouere74de522018-05-24 16:46:17 +0200223 int sent = 0, drops = 0, err = 0;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200224 int i;
225
226 if (unlikely(!bq->count))
227 return 0;
228
229 for (i = 0; i < bq->count; i++) {
230 struct xdp_frame *xdpf = bq->q[i];
231
232 prefetch(xdpf);
233 }
234
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200235 sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q);
236 if (sent < 0) {
Jesper Dangaard Brouere74de522018-05-24 16:46:17 +0200237 err = sent;
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200238 sent = 0;
239 goto error;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200240 }
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200241 drops = bq->count - sent;
242out:
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200243 bq->count = 0;
244
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200245 trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
Jesper Dangaard Brouere74de522018-05-24 16:46:17 +0200246 sent, drops, bq->dev_rx, dev, err);
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200247 bq->dev_rx = NULL;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200248 return 0;
Jesper Dangaard Brouer735fc402018-05-24 16:46:12 +0200249error:
250 /* If ndo_xdp_xmit fails with an errno, no frames have been
251 * xmit'ed and it's our responsibility to them free all.
252 */
253 for (i = 0; i < bq->count; i++) {
254 struct xdp_frame *xdpf = bq->q[i];
255
256 /* RX path under NAPI protection, can return frames faster */
257 xdp_return_frame_rx_napi(xdpf);
258 drops++;
259 }
260 goto out;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200261}
262
John Fastabend11393cc2017-07-17 09:29:40 -0700263/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
264 * from the driver before returning from its napi->poll() routine. The poll()
265 * routine is called either from busy_poll context or net_rx_action signaled
266 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
267 * net device can be torn down. On devmap tear down we ensure the ctx bitmap
268 * is zeroed before completing to ensure all flush operations have completed.
269 */
270void __dev_map_flush(struct bpf_map *map)
271{
272 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
273 unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
274 u32 bit;
275
276 for_each_set_bit(bit, bitmap, map->max_entries) {
277 struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200278 struct xdp_bulk_queue *bq;
John Fastabend11393cc2017-07-17 09:29:40 -0700279 struct net_device *netdev;
280
281 /* This is possible if the dev entry is removed by user space
282 * between xdp redirect and flush op.
283 */
284 if (unlikely(!dev))
285 continue;
286
John Fastabend11393cc2017-07-17 09:29:40 -0700287 __clear_bit(bit, bitmap);
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200288
289 bq = this_cpu_ptr(dev->bulkq);
290 bq_xmit_all(dev, bq);
Daniel Borkmanna5e2da62017-08-24 03:20:11 +0200291 netdev = dev->dev;
292 if (likely(netdev->netdev_ops->ndo_xdp_flush))
293 netdev->netdev_ops->ndo_xdp_flush(netdev);
John Fastabend11393cc2017-07-17 09:29:40 -0700294 }
295}
296
John Fastabend546ac1f2017-07-17 09:28:56 -0700297/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
298 * update happens in parallel here a dev_put wont happen until after reading the
299 * ifindex.
300 */
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200301struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
John Fastabend546ac1f2017-07-17 09:28:56 -0700302{
303 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200304 struct bpf_dtab_netdev *obj;
John Fastabend546ac1f2017-07-17 09:28:56 -0700305
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200306 if (key >= map->max_entries)
John Fastabend546ac1f2017-07-17 09:28:56 -0700307 return NULL;
308
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200309 obj = READ_ONCE(dtab->netdev_map[key]);
310 return obj;
311}
312
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200313/* Runs under RCU-read-side, plus in softirq under NAPI protection.
314 * Thus, safe percpu variable access.
315 */
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200316static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
317 struct net_device *dev_rx)
318
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200319{
320 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
321
322 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
323 bq_xmit_all(obj, bq);
324
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200325 /* Ingress dev_rx will be the same for all xdp_frame's in
326 * bulk_queue, because bq stored per-CPU and must be flushed
327 * from net_device drivers NAPI func end.
328 */
329 if (!bq->dev_rx)
330 bq->dev_rx = dev_rx;
331
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200332 bq->q[bq->count++] = xdpf;
333 return 0;
334}
335
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200336int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
337 struct net_device *dev_rx)
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200338{
339 struct net_device *dev = dst->dev;
340 struct xdp_frame *xdpf;
341
342 if (!dev->netdev_ops->ndo_xdp_xmit)
343 return -EOPNOTSUPP;
344
345 xdpf = convert_to_xdp_frame(xdp);
346 if (unlikely(!xdpf))
347 return -EOVERFLOW;
348
Jesper Dangaard Brouer38edddb2018-05-24 16:45:57 +0200349 return bq_enqueue(dst, xdpf, dev_rx);
John Fastabend546ac1f2017-07-17 09:28:56 -0700350}
351
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200352static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
John Fastabend11393cc2017-07-17 09:29:40 -0700353{
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200354 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
Colin Ian King71b2c872018-05-30 16:09:16 +0100355 struct net_device *dev = obj ? obj->dev : NULL;
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200356
357 return dev ? &dev->ifindex : NULL;
358}
359
360static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
361{
362 if (dev->dev->netdev_ops->ndo_xdp_flush) {
363 struct net_device *fl = dev->dev;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200364 struct xdp_bulk_queue *bq;
John Fastabend11393cc2017-07-17 09:29:40 -0700365 unsigned long *bitmap;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200366
John Fastabend11393cc2017-07-17 09:29:40 -0700367 int cpu;
368
369 for_each_online_cpu(cpu) {
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200370 bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
371 __clear_bit(dev->bit, bitmap);
John Fastabend11393cc2017-07-17 09:29:40 -0700372
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200373 bq = per_cpu_ptr(dev->bulkq, cpu);
374 bq_xmit_all(dev, bq);
375
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200376 fl->netdev_ops->ndo_xdp_flush(dev->dev);
John Fastabend11393cc2017-07-17 09:29:40 -0700377 }
378 }
379}
380
John Fastabend546ac1f2017-07-17 09:28:56 -0700381static void __dev_map_entry_free(struct rcu_head *rcu)
382{
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200383 struct bpf_dtab_netdev *dev;
John Fastabend546ac1f2017-07-17 09:28:56 -0700384
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200385 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
386 dev_map_flush_old(dev);
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200387 free_percpu(dev->bulkq);
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200388 dev_put(dev->dev);
389 kfree(dev);
John Fastabend546ac1f2017-07-17 09:28:56 -0700390}
391
392static int dev_map_delete_elem(struct bpf_map *map, void *key)
393{
394 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
395 struct bpf_dtab_netdev *old_dev;
396 int k = *(u32 *)key;
397
398 if (k >= map->max_entries)
399 return -EINVAL;
400
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200401 /* Use call_rcu() here to ensure any rcu critical sections have
402 * completed, but this does not guarantee a flush has happened
John Fastabend546ac1f2017-07-17 09:28:56 -0700403 * yet. Because driver side rcu_read_lock/unlock only protects the
404 * running XDP program. However, for pending flush operations the
405 * dev and ctx are stored in another per cpu map. And additionally,
406 * the driver tear down ensures all soft irqs are complete before
407 * removing the net device in the case of dev_put equals zero.
408 */
409 old_dev = xchg(&dtab->netdev_map[k], NULL);
410 if (old_dev)
411 call_rcu(&old_dev->rcu, __dev_map_entry_free);
412 return 0;
413}
414
415static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
416 u64 map_flags)
417{
418 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
419 struct net *net = current->nsproxy->net_ns;
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200420 gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
John Fastabend546ac1f2017-07-17 09:28:56 -0700421 struct bpf_dtab_netdev *dev, *old_dev;
422 u32 i = *(u32 *)key;
423 u32 ifindex = *(u32 *)value;
424
425 if (unlikely(map_flags > BPF_EXIST))
426 return -EINVAL;
John Fastabend546ac1f2017-07-17 09:28:56 -0700427 if (unlikely(i >= dtab->map.max_entries))
428 return -E2BIG;
John Fastabend546ac1f2017-07-17 09:28:56 -0700429 if (unlikely(map_flags == BPF_NOEXIST))
430 return -EEXIST;
431
432 if (!ifindex) {
433 dev = NULL;
434 } else {
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200435 dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
John Fastabend546ac1f2017-07-17 09:28:56 -0700436 if (!dev)
437 return -ENOMEM;
438
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200439 dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
440 sizeof(void *), gfp);
441 if (!dev->bulkq) {
442 kfree(dev);
443 return -ENOMEM;
444 }
445
John Fastabend546ac1f2017-07-17 09:28:56 -0700446 dev->dev = dev_get_by_index(net, ifindex);
447 if (!dev->dev) {
Jesper Dangaard Brouer5d053f92018-05-24 16:45:51 +0200448 free_percpu(dev->bulkq);
John Fastabend546ac1f2017-07-17 09:28:56 -0700449 kfree(dev);
450 return -EINVAL;
451 }
452
Daniel Borkmannaf4d0452017-08-23 01:47:54 +0200453 dev->bit = i;
John Fastabend546ac1f2017-07-17 09:28:56 -0700454 dev->dtab = dtab;
455 }
456
457 /* Use call_rcu() here to ensure rcu critical sections have completed
458 * Remembering the driver side flush operation will happen before the
459 * net device is removed.
460 */
461 old_dev = xchg(&dtab->netdev_map[i], dev);
462 if (old_dev)
463 call_rcu(&old_dev->rcu, __dev_map_entry_free);
464
465 return 0;
466}
467
468const struct bpf_map_ops dev_map_ops = {
469 .map_alloc = dev_map_alloc,
470 .map_free = dev_map_free,
471 .map_get_next_key = dev_map_get_next_key,
472 .map_lookup_elem = dev_map_lookup_elem,
473 .map_update_elem = dev_map_update_elem,
474 .map_delete_elem = dev_map_delete_elem,
475};
John Fastabend2ddf71e2017-07-17 09:30:02 -0700476
477static int dev_map_notification(struct notifier_block *notifier,
478 ulong event, void *ptr)
479{
480 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
481 struct bpf_dtab *dtab;
482 int i;
483
484 switch (event) {
485 case NETDEV_UNREGISTER:
John Fastabend4cc7b952017-08-04 22:02:19 -0700486 /* This rcu_read_lock/unlock pair is needed because
487 * dev_map_list is an RCU list AND to ensure a delete
488 * operation does not free a netdev_map entry while we
489 * are comparing it against the netdev being unregistered.
490 */
491 rcu_read_lock();
492 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
John Fastabend2ddf71e2017-07-17 09:30:02 -0700493 for (i = 0; i < dtab->map.max_entries; i++) {
John Fastabend4cc7b952017-08-04 22:02:19 -0700494 struct bpf_dtab_netdev *dev, *odev;
John Fastabend2ddf71e2017-07-17 09:30:02 -0700495
John Fastabend4cc7b952017-08-04 22:02:19 -0700496 dev = READ_ONCE(dtab->netdev_map[i]);
John Fastabend2ddf71e2017-07-17 09:30:02 -0700497 if (!dev ||
498 dev->dev->ifindex != netdev->ifindex)
499 continue;
John Fastabend4cc7b952017-08-04 22:02:19 -0700500 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
501 if (dev == odev)
John Fastabend2ddf71e2017-07-17 09:30:02 -0700502 call_rcu(&dev->rcu,
503 __dev_map_entry_free);
504 }
505 }
John Fastabend4cc7b952017-08-04 22:02:19 -0700506 rcu_read_unlock();
John Fastabend2ddf71e2017-07-17 09:30:02 -0700507 break;
508 default:
509 break;
510 }
511 return NOTIFY_OK;
512}
513
514static struct notifier_block dev_map_notifier = {
515 .notifier_call = dev_map_notification,
516};
517
518static int __init dev_map_init(void)
519{
Jesper Dangaard Brouer67f29e02018-05-24 16:45:46 +0200520 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
521 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
522 offsetof(struct _bpf_dtab_netdev, dev));
John Fastabend2ddf71e2017-07-17 09:30:02 -0700523 register_netdevice_notifier(&dev_map_notifier);
524 return 0;
525}
526
527subsys_initcall(dev_map_init);