John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, but |
| 8 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 10 | * General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | /* Devmaps primary use is as a backend map for XDP BPF helper call |
| 14 | * bpf_redirect_map(). Because XDP is mostly concerned with performance we |
| 15 | * spent some effort to ensure the datapath with redirect maps does not use |
| 16 | * any locking. This is a quick note on the details. |
| 17 | * |
| 18 | * We have three possible paths to get into the devmap control plane bpf |
| 19 | * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall |
| 20 | * will invoke an update, delete, or lookup operation. To ensure updates and |
| 21 | * deletes appear atomic from the datapath side xchg() is used to modify the |
| 22 | * netdev_map array. Then because the datapath does a lookup into the netdev_map |
| 23 | * array (read-only) from an RCU critical section we use call_rcu() to wait for |
| 24 | * an rcu grace period before free'ing the old data structures. This ensures the |
| 25 | * datapath always has a valid copy. However, the datapath does a "flush" |
| 26 | * operation that pushes any pending packets in the driver outside the RCU |
| 27 | * critical section. Each bpf_dtab_netdev tracks these pending operations using |
| 28 | * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed |
| 29 | * until all bits are cleared indicating outstanding flush operations have |
| 30 | * completed. |
| 31 | * |
| 32 | * BPF syscalls may race with BPF program calls on any of the update, delete |
| 33 | * or lookup operations. As noted above the xchg() operation also keep the |
| 34 | * netdev_map consistent in this case. From the devmap side BPF programs |
| 35 | * calling into these operations are the same as multiple user space threads |
| 36 | * making system calls. |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 37 | * |
| 38 | * Finally, any of the above may race with a netdev_unregister notifier. The |
| 39 | * unregister notifier must search for net devices in the map structure that |
| 40 | * contain a reference to the net device and remove them. This is a two step |
| 41 | * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) |
| 42 | * check to see if the ifindex is the same as the net_device being removed. |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 43 | * When removing the dev a cmpxchg() is used to ensure the correct dev is |
| 44 | * removed, in the case of a concurrent update or delete operation it is |
| 45 | * possible that the initially referenced dev is no longer in the map. As the |
| 46 | * notifier hook walks the map we know that new dev references can not be |
| 47 | * added by the user because core infrastructure ensures dev_get_by_index() |
| 48 | * calls will fail at this point. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 49 | */ |
| 50 | #include <linux/bpf.h> |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 51 | #include <net/xdp.h> |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 52 | #include <linux/filter.h> |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 53 | #include <trace/events/xdp.h> |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 54 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 55 | #define DEV_CREATE_FLAG_MASK \ |
| 56 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
| 57 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 58 | #define DEV_MAP_BULK_SIZE 16 |
| 59 | struct xdp_bulk_queue { |
| 60 | struct xdp_frame *q[DEV_MAP_BULK_SIZE]; |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 61 | struct net_device *dev_rx; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 62 | unsigned int count; |
| 63 | }; |
| 64 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 65 | struct bpf_dtab_netdev { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 66 | struct net_device *dev; /* must be first member, due to tracepoint */ |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 67 | struct bpf_dtab *dtab; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 68 | unsigned int bit; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 69 | struct xdp_bulk_queue __percpu *bulkq; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 70 | struct rcu_head rcu; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 71 | }; |
| 72 | |
| 73 | struct bpf_dtab { |
| 74 | struct bpf_map map; |
| 75 | struct bpf_dtab_netdev **netdev_map; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 76 | unsigned long __percpu *flush_needed; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 77 | struct list_head list; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 78 | }; |
| 79 | |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 80 | static DEFINE_SPINLOCK(dev_map_lock); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 81 | static LIST_HEAD(dev_map_list); |
| 82 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 83 | static u64 dev_map_bitmap_size(const union bpf_attr *attr) |
| 84 | { |
John Fastabend | 8695a53 | 2017-10-19 09:03:52 -0700 | [diff] [blame] | 85 | return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long); |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 86 | } |
| 87 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 88 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) |
| 89 | { |
| 90 | struct bpf_dtab *dtab; |
Tobias Klauser | 582db7e | 2017-09-18 15:03:46 +0200 | [diff] [blame] | 91 | int err = -EINVAL; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 92 | u64 cost; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 93 | |
John Fastabend | 9ef2a8c | 2017-10-18 07:11:44 -0700 | [diff] [blame] | 94 | if (!capable(CAP_NET_ADMIN)) |
| 95 | return ERR_PTR(-EPERM); |
| 96 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 97 | /* check sanity of attributes */ |
| 98 | if (attr->max_entries == 0 || attr->key_size != 4 || |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 99 | attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 100 | return ERR_PTR(-EINVAL); |
| 101 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 102 | dtab = kzalloc(sizeof(*dtab), GFP_USER); |
| 103 | if (!dtab) |
| 104 | return ERR_PTR(-ENOMEM); |
| 105 | |
Jakub Kicinski | bd47564 | 2018-01-11 20:29:06 -0800 | [diff] [blame] | 106 | bpf_map_init_from_attr(&dtab->map, attr); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 107 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 108 | /* make sure page count doesn't overflow */ |
| 109 | cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 110 | cost += dev_map_bitmap_size(attr) * num_possible_cpus(); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 111 | if (cost >= U32_MAX - PAGE_SIZE) |
| 112 | goto free_dtab; |
| 113 | |
| 114 | dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
| 115 | |
| 116 | /* if map size is larger than memlock limit, reject it early */ |
| 117 | err = bpf_map_precharge_memlock(dtab->map.pages); |
| 118 | if (err) |
| 119 | goto free_dtab; |
| 120 | |
Tobias Klauser | 582db7e | 2017-09-18 15:03:46 +0200 | [diff] [blame] | 121 | err = -ENOMEM; |
| 122 | |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 123 | /* A per cpu bitfield with a bit per possible net device */ |
Daniel Borkmann | 82f8dd2 | 2017-10-17 16:55:53 +0200 | [diff] [blame] | 124 | dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr), |
| 125 | __alignof__(unsigned long), |
| 126 | GFP_KERNEL | __GFP_NOWARN); |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 127 | if (!dtab->flush_needed) |
| 128 | goto free_dtab; |
| 129 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 130 | dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * |
Martin KaFai Lau | 96eabe7 | 2017-08-18 11:28:00 -0700 | [diff] [blame] | 131 | sizeof(struct bpf_dtab_netdev *), |
| 132 | dtab->map.numa_node); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 133 | if (!dtab->netdev_map) |
| 134 | goto free_dtab; |
| 135 | |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 136 | spin_lock(&dev_map_lock); |
| 137 | list_add_tail_rcu(&dtab->list, &dev_map_list); |
| 138 | spin_unlock(&dev_map_lock); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 139 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 140 | return &dtab->map; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 141 | free_dtab: |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 142 | free_percpu(dtab->flush_needed); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 143 | kfree(dtab); |
Tobias Klauser | 582db7e | 2017-09-18 15:03:46 +0200 | [diff] [blame] | 144 | return ERR_PTR(err); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static void dev_map_free(struct bpf_map *map) |
| 148 | { |
| 149 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 150 | int i, cpu; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 151 | |
| 152 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, |
| 153 | * so the programs (can be more than one that used this map) were |
| 154 | * disconnected from events. Wait for outstanding critical sections in |
| 155 | * these programs to complete. The rcu critical section only guarantees |
| 156 | * no further reads against netdev_map. It does __not__ ensure pending |
| 157 | * flush operations (if any) are complete. |
| 158 | */ |
Daniel Borkmann | 274043c | 2017-08-21 01:48:12 +0200 | [diff] [blame] | 159 | |
| 160 | spin_lock(&dev_map_lock); |
| 161 | list_del_rcu(&dtab->list); |
| 162 | spin_unlock(&dev_map_lock); |
| 163 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 164 | synchronize_rcu(); |
| 165 | |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 166 | /* To ensure all pending flush operations have completed wait for flush |
| 167 | * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. |
| 168 | * Because the above synchronize_rcu() ensures the map is disconnected |
| 169 | * from the program we can assume no new bits will be set. |
| 170 | */ |
| 171 | for_each_online_cpu(cpu) { |
| 172 | unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu); |
| 173 | |
| 174 | while (!bitmap_empty(bitmap, dtab->map.max_entries)) |
John Fastabend | 374fb01 | 2017-09-08 14:01:10 -0700 | [diff] [blame] | 175 | cond_resched(); |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 176 | } |
| 177 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 178 | for (i = 0; i < dtab->map.max_entries; i++) { |
| 179 | struct bpf_dtab_netdev *dev; |
| 180 | |
| 181 | dev = dtab->netdev_map[i]; |
| 182 | if (!dev) |
| 183 | continue; |
| 184 | |
| 185 | dev_put(dev->dev); |
| 186 | kfree(dev); |
| 187 | } |
| 188 | |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 189 | free_percpu(dtab->flush_needed); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 190 | bpf_map_area_free(dtab->netdev_map); |
| 191 | kfree(dtab); |
| 192 | } |
| 193 | |
| 194 | static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 195 | { |
| 196 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 197 | u32 index = key ? *(u32 *)key : U32_MAX; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 198 | u32 *next = next_key; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 199 | |
| 200 | if (index >= dtab->map.max_entries) { |
| 201 | *next = 0; |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | if (index == dtab->map.max_entries - 1) |
| 206 | return -ENOENT; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 207 | *next = index + 1; |
| 208 | return 0; |
| 209 | } |
| 210 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 211 | void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 212 | { |
| 213 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 214 | unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); |
| 215 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 216 | __set_bit(bit, bitmap); |
John Fastabend | 97f91a7 | 2017-07-17 09:29:18 -0700 | [diff] [blame] | 217 | } |
| 218 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 219 | static int bq_xmit_all(struct bpf_dtab_netdev *obj, |
| 220 | struct xdp_bulk_queue *bq) |
| 221 | { |
| 222 | struct net_device *dev = obj->dev; |
Jesper Dangaard Brouer | e74de52 | 2018-05-24 16:46:17 +0200 | [diff] [blame] | 223 | int sent = 0, drops = 0, err = 0; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 224 | int i; |
| 225 | |
| 226 | if (unlikely(!bq->count)) |
| 227 | return 0; |
| 228 | |
| 229 | for (i = 0; i < bq->count; i++) { |
| 230 | struct xdp_frame *xdpf = bq->q[i]; |
| 231 | |
| 232 | prefetch(xdpf); |
| 233 | } |
| 234 | |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 235 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q); |
| 236 | if (sent < 0) { |
Jesper Dangaard Brouer | e74de52 | 2018-05-24 16:46:17 +0200 | [diff] [blame] | 237 | err = sent; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 238 | sent = 0; |
| 239 | goto error; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 240 | } |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 241 | drops = bq->count - sent; |
| 242 | out: |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 243 | bq->count = 0; |
| 244 | |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 245 | trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit, |
Jesper Dangaard Brouer | e74de52 | 2018-05-24 16:46:17 +0200 | [diff] [blame] | 246 | sent, drops, bq->dev_rx, dev, err); |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 247 | bq->dev_rx = NULL; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 248 | return 0; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 249 | error: |
| 250 | /* If ndo_xdp_xmit fails with an errno, no frames have been |
| 251 | * xmit'ed and it's our responsibility to them free all. |
| 252 | */ |
| 253 | for (i = 0; i < bq->count; i++) { |
| 254 | struct xdp_frame *xdpf = bq->q[i]; |
| 255 | |
| 256 | /* RX path under NAPI protection, can return frames faster */ |
| 257 | xdp_return_frame_rx_napi(xdpf); |
| 258 | drops++; |
| 259 | } |
| 260 | goto out; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 261 | } |
| 262 | |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 263 | /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled |
| 264 | * from the driver before returning from its napi->poll() routine. The poll() |
| 265 | * routine is called either from busy_poll context or net_rx_action signaled |
| 266 | * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the |
| 267 | * net device can be torn down. On devmap tear down we ensure the ctx bitmap |
| 268 | * is zeroed before completing to ensure all flush operations have completed. |
| 269 | */ |
| 270 | void __dev_map_flush(struct bpf_map *map) |
| 271 | { |
| 272 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 273 | unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); |
| 274 | u32 bit; |
| 275 | |
| 276 | for_each_set_bit(bit, bitmap, map->max_entries) { |
| 277 | struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 278 | struct xdp_bulk_queue *bq; |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 279 | struct net_device *netdev; |
| 280 | |
| 281 | /* This is possible if the dev entry is removed by user space |
| 282 | * between xdp redirect and flush op. |
| 283 | */ |
| 284 | if (unlikely(!dev)) |
| 285 | continue; |
| 286 | |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 287 | __clear_bit(bit, bitmap); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 288 | |
| 289 | bq = this_cpu_ptr(dev->bulkq); |
| 290 | bq_xmit_all(dev, bq); |
Daniel Borkmann | a5e2da6 | 2017-08-24 03:20:11 +0200 | [diff] [blame] | 291 | netdev = dev->dev; |
| 292 | if (likely(netdev->netdev_ops->ndo_xdp_flush)) |
| 293 | netdev->netdev_ops->ndo_xdp_flush(netdev); |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 294 | } |
| 295 | } |
| 296 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 297 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or |
| 298 | * update happens in parallel here a dev_put wont happen until after reading the |
| 299 | * ifindex. |
| 300 | */ |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 301 | struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 302 | { |
| 303 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 304 | struct bpf_dtab_netdev *obj; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 305 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 306 | if (key >= map->max_entries) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 307 | return NULL; |
| 308 | |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 309 | obj = READ_ONCE(dtab->netdev_map[key]); |
| 310 | return obj; |
| 311 | } |
| 312 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 313 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
| 314 | * Thus, safe percpu variable access. |
| 315 | */ |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 316 | static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, |
| 317 | struct net_device *dev_rx) |
| 318 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 319 | { |
| 320 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); |
| 321 | |
| 322 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) |
| 323 | bq_xmit_all(obj, bq); |
| 324 | |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 325 | /* Ingress dev_rx will be the same for all xdp_frame's in |
| 326 | * bulk_queue, because bq stored per-CPU and must be flushed |
| 327 | * from net_device drivers NAPI func end. |
| 328 | */ |
| 329 | if (!bq->dev_rx) |
| 330 | bq->dev_rx = dev_rx; |
| 331 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 332 | bq->q[bq->count++] = xdpf; |
| 333 | return 0; |
| 334 | } |
| 335 | |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 336 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, |
| 337 | struct net_device *dev_rx) |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 338 | { |
| 339 | struct net_device *dev = dst->dev; |
| 340 | struct xdp_frame *xdpf; |
| 341 | |
| 342 | if (!dev->netdev_ops->ndo_xdp_xmit) |
| 343 | return -EOPNOTSUPP; |
| 344 | |
| 345 | xdpf = convert_to_xdp_frame(xdp); |
| 346 | if (unlikely(!xdpf)) |
| 347 | return -EOVERFLOW; |
| 348 | |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 349 | return bq_enqueue(dst, xdpf, dev_rx); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 350 | } |
| 351 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 352 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 353 | { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 354 | struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); |
Colin Ian King | 71b2c87 | 2018-05-30 16:09:16 +0100 | [diff] [blame^] | 355 | struct net_device *dev = obj ? obj->dev : NULL; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 356 | |
| 357 | return dev ? &dev->ifindex : NULL; |
| 358 | } |
| 359 | |
| 360 | static void dev_map_flush_old(struct bpf_dtab_netdev *dev) |
| 361 | { |
| 362 | if (dev->dev->netdev_ops->ndo_xdp_flush) { |
| 363 | struct net_device *fl = dev->dev; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 364 | struct xdp_bulk_queue *bq; |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 365 | unsigned long *bitmap; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 366 | |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 367 | int cpu; |
| 368 | |
| 369 | for_each_online_cpu(cpu) { |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 370 | bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); |
| 371 | __clear_bit(dev->bit, bitmap); |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 372 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 373 | bq = per_cpu_ptr(dev->bulkq, cpu); |
| 374 | bq_xmit_all(dev, bq); |
| 375 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 376 | fl->netdev_ops->ndo_xdp_flush(dev->dev); |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 377 | } |
| 378 | } |
| 379 | } |
| 380 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 381 | static void __dev_map_entry_free(struct rcu_head *rcu) |
| 382 | { |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 383 | struct bpf_dtab_netdev *dev; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 384 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 385 | dev = container_of(rcu, struct bpf_dtab_netdev, rcu); |
| 386 | dev_map_flush_old(dev); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 387 | free_percpu(dev->bulkq); |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 388 | dev_put(dev->dev); |
| 389 | kfree(dev); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 390 | } |
| 391 | |
| 392 | static int dev_map_delete_elem(struct bpf_map *map, void *key) |
| 393 | { |
| 394 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 395 | struct bpf_dtab_netdev *old_dev; |
| 396 | int k = *(u32 *)key; |
| 397 | |
| 398 | if (k >= map->max_entries) |
| 399 | return -EINVAL; |
| 400 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 401 | /* Use call_rcu() here to ensure any rcu critical sections have |
| 402 | * completed, but this does not guarantee a flush has happened |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 403 | * yet. Because driver side rcu_read_lock/unlock only protects the |
| 404 | * running XDP program. However, for pending flush operations the |
| 405 | * dev and ctx are stored in another per cpu map. And additionally, |
| 406 | * the driver tear down ensures all soft irqs are complete before |
| 407 | * removing the net device in the case of dev_put equals zero. |
| 408 | */ |
| 409 | old_dev = xchg(&dtab->netdev_map[k], NULL); |
| 410 | if (old_dev) |
| 411 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 412 | return 0; |
| 413 | } |
| 414 | |
| 415 | static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 416 | u64 map_flags) |
| 417 | { |
| 418 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 419 | struct net *net = current->nsproxy->net_ns; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 420 | gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 421 | struct bpf_dtab_netdev *dev, *old_dev; |
| 422 | u32 i = *(u32 *)key; |
| 423 | u32 ifindex = *(u32 *)value; |
| 424 | |
| 425 | if (unlikely(map_flags > BPF_EXIST)) |
| 426 | return -EINVAL; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 427 | if (unlikely(i >= dtab->map.max_entries)) |
| 428 | return -E2BIG; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 429 | if (unlikely(map_flags == BPF_NOEXIST)) |
| 430 | return -EEXIST; |
| 431 | |
| 432 | if (!ifindex) { |
| 433 | dev = NULL; |
| 434 | } else { |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 435 | dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 436 | if (!dev) |
| 437 | return -ENOMEM; |
| 438 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 439 | dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq), |
| 440 | sizeof(void *), gfp); |
| 441 | if (!dev->bulkq) { |
| 442 | kfree(dev); |
| 443 | return -ENOMEM; |
| 444 | } |
| 445 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 446 | dev->dev = dev_get_by_index(net, ifindex); |
| 447 | if (!dev->dev) { |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 448 | free_percpu(dev->bulkq); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 449 | kfree(dev); |
| 450 | return -EINVAL; |
| 451 | } |
| 452 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 453 | dev->bit = i; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 454 | dev->dtab = dtab; |
| 455 | } |
| 456 | |
| 457 | /* Use call_rcu() here to ensure rcu critical sections have completed |
| 458 | * Remembering the driver side flush operation will happen before the |
| 459 | * net device is removed. |
| 460 | */ |
| 461 | old_dev = xchg(&dtab->netdev_map[i], dev); |
| 462 | if (old_dev) |
| 463 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 464 | |
| 465 | return 0; |
| 466 | } |
| 467 | |
| 468 | const struct bpf_map_ops dev_map_ops = { |
| 469 | .map_alloc = dev_map_alloc, |
| 470 | .map_free = dev_map_free, |
| 471 | .map_get_next_key = dev_map_get_next_key, |
| 472 | .map_lookup_elem = dev_map_lookup_elem, |
| 473 | .map_update_elem = dev_map_update_elem, |
| 474 | .map_delete_elem = dev_map_delete_elem, |
| 475 | }; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 476 | |
| 477 | static int dev_map_notification(struct notifier_block *notifier, |
| 478 | ulong event, void *ptr) |
| 479 | { |
| 480 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); |
| 481 | struct bpf_dtab *dtab; |
| 482 | int i; |
| 483 | |
| 484 | switch (event) { |
| 485 | case NETDEV_UNREGISTER: |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 486 | /* This rcu_read_lock/unlock pair is needed because |
| 487 | * dev_map_list is an RCU list AND to ensure a delete |
| 488 | * operation does not free a netdev_map entry while we |
| 489 | * are comparing it against the netdev being unregistered. |
| 490 | */ |
| 491 | rcu_read_lock(); |
| 492 | list_for_each_entry_rcu(dtab, &dev_map_list, list) { |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 493 | for (i = 0; i < dtab->map.max_entries; i++) { |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 494 | struct bpf_dtab_netdev *dev, *odev; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 495 | |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 496 | dev = READ_ONCE(dtab->netdev_map[i]); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 497 | if (!dev || |
| 498 | dev->dev->ifindex != netdev->ifindex) |
| 499 | continue; |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 500 | odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); |
| 501 | if (dev == odev) |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 502 | call_rcu(&dev->rcu, |
| 503 | __dev_map_entry_free); |
| 504 | } |
| 505 | } |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 506 | rcu_read_unlock(); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 507 | break; |
| 508 | default: |
| 509 | break; |
| 510 | } |
| 511 | return NOTIFY_OK; |
| 512 | } |
| 513 | |
| 514 | static struct notifier_block dev_map_notifier = { |
| 515 | .notifier_call = dev_map_notification, |
| 516 | }; |
| 517 | |
| 518 | static int __init dev_map_init(void) |
| 519 | { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 520 | /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ |
| 521 | BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != |
| 522 | offsetof(struct _bpf_dtab_netdev, dev)); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 523 | register_netdevice_notifier(&dev_map_notifier); |
| 524 | return 0; |
| 525 | } |
| 526 | |
| 527 | subsys_initcall(dev_map_init); |