Thomas Gleixner | 5b497af | 2019-05-29 07:18:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 2 | /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 3 | */ |
| 4 | |
| 5 | /* Devmaps primary use is as a backend map for XDP BPF helper call |
| 6 | * bpf_redirect_map(). Because XDP is mostly concerned with performance we |
| 7 | * spent some effort to ensure the datapath with redirect maps does not use |
| 8 | * any locking. This is a quick note on the details. |
| 9 | * |
| 10 | * We have three possible paths to get into the devmap control plane bpf |
| 11 | * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall |
| 12 | * will invoke an update, delete, or lookup operation. To ensure updates and |
| 13 | * deletes appear atomic from the datapath side xchg() is used to modify the |
| 14 | * netdev_map array. Then because the datapath does a lookup into the netdev_map |
| 15 | * array (read-only) from an RCU critical section we use call_rcu() to wait for |
| 16 | * an rcu grace period before free'ing the old data structures. This ensures the |
| 17 | * datapath always has a valid copy. However, the datapath does a "flush" |
| 18 | * operation that pushes any pending packets in the driver outside the RCU |
| 19 | * critical section. Each bpf_dtab_netdev tracks these pending operations using |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 20 | * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until |
| 21 | * this list is empty, indicating outstanding flush operations have completed. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 22 | * |
| 23 | * BPF syscalls may race with BPF program calls on any of the update, delete |
| 24 | * or lookup operations. As noted above the xchg() operation also keep the |
| 25 | * netdev_map consistent in this case. From the devmap side BPF programs |
| 26 | * calling into these operations are the same as multiple user space threads |
| 27 | * making system calls. |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 28 | * |
| 29 | * Finally, any of the above may race with a netdev_unregister notifier. The |
| 30 | * unregister notifier must search for net devices in the map structure that |
| 31 | * contain a reference to the net device and remove them. This is a two step |
| 32 | * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) |
| 33 | * check to see if the ifindex is the same as the net_device being removed. |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 34 | * When removing the dev a cmpxchg() is used to ensure the correct dev is |
| 35 | * removed, in the case of a concurrent update or delete operation it is |
| 36 | * possible that the initially referenced dev is no longer in the map. As the |
| 37 | * notifier hook walks the map we know that new dev references can not be |
| 38 | * added by the user because core infrastructure ensures dev_get_by_index() |
| 39 | * calls will fail at this point. |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 40 | * |
| 41 | * The devmap_hash type is a map type which interprets keys as ifindexes and |
| 42 | * indexes these using a hashmap. This allows maps that use ifindex as key to be |
| 43 | * densely packed instead of having holes in the lookup array for unused |
| 44 | * ifindexes. The setup and packet enqueue/send code is shared between the two |
| 45 | * types of devmap; only the lookup and insertion is different. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 46 | */ |
| 47 | #include <linux/bpf.h> |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 48 | #include <net/xdp.h> |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 49 | #include <linux/filter.h> |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 50 | #include <trace/events/xdp.h> |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 51 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 52 | #define DEV_CREATE_FLAG_MASK \ |
| 53 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
| 54 | |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 55 | struct xdp_dev_bulk_queue { |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 56 | struct xdp_frame *q[DEV_MAP_BULK_SIZE]; |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 57 | struct list_head flush_node; |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 58 | struct net_device *dev; |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 59 | struct net_device *dev_rx; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 60 | unsigned int count; |
| 61 | }; |
| 62 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 63 | struct bpf_dtab_netdev { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 64 | struct net_device *dev; /* must be first member, due to tracepoint */ |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 65 | struct hlist_node index_hlist; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 66 | struct bpf_dtab *dtab; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 67 | struct rcu_head rcu; |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 68 | unsigned int idx; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 69 | }; |
| 70 | |
| 71 | struct bpf_dtab { |
| 72 | struct bpf_map map; |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 73 | struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 74 | struct list_head list; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 75 | |
| 76 | /* these are only used for DEVMAP_HASH type maps */ |
| 77 | struct hlist_head *dev_index_head; |
| 78 | spinlock_t index_lock; |
| 79 | unsigned int items; |
| 80 | u32 n_buckets; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 81 | }; |
| 82 | |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 83 | static DEFINE_PER_CPU(struct list_head, dev_flush_list); |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 84 | static DEFINE_SPINLOCK(dev_map_lock); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 85 | static LIST_HEAD(dev_map_list); |
| 86 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 87 | static struct hlist_head *dev_map_create_hash(unsigned int entries) |
| 88 | { |
| 89 | int i; |
| 90 | struct hlist_head *hash; |
| 91 | |
| 92 | hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); |
| 93 | if (hash != NULL) |
| 94 | for (i = 0; i < entries; i++) |
| 95 | INIT_HLIST_HEAD(&hash[i]); |
| 96 | |
| 97 | return hash; |
| 98 | } |
| 99 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 100 | static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, |
| 101 | int idx) |
| 102 | { |
| 103 | return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; |
| 104 | } |
| 105 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 106 | static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 107 | { |
Björn Töpel | 9636000 | 2019-12-19 07:10:03 +0100 | [diff] [blame] | 108 | u64 cost = 0; |
| 109 | int err; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 110 | |
| 111 | /* check sanity of attributes */ |
| 112 | if (attr->max_entries == 0 || attr->key_size != 4 || |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 113 | attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 114 | return -EINVAL; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 115 | |
Toke Høiland-Jørgensen | 0cdbb4b | 2019-06-28 11:12:35 +0200 | [diff] [blame] | 116 | /* Lookup returns a pointer straight to dev->ifindex, so make sure the |
| 117 | * verifier prevents writes from the BPF side |
| 118 | */ |
| 119 | attr->map_flags |= BPF_F_RDONLY_PROG; |
| 120 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 121 | |
Jakub Kicinski | bd47564 | 2018-01-11 20:29:06 -0800 | [diff] [blame] | 122 | bpf_map_init_from_attr(&dtab->map, attr); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 123 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 124 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
| 125 | dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); |
| 126 | |
| 127 | if (!dtab->n_buckets) /* Overflow check */ |
| 128 | return -EINVAL; |
Toke Høiland-Jørgensen | 05679ca | 2019-10-17 12:57:02 +0200 | [diff] [blame] | 129 | cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 130 | } else { |
| 131 | cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 132 | } |
| 133 | |
Roman Gushchin | b936ca6 | 2019-05-29 18:03:58 -0700 | [diff] [blame] | 134 | /* if map size is larger than memlock limit, reject it */ |
Roman Gushchin | c85d691 | 2019-05-29 18:03:59 -0700 | [diff] [blame] | 135 | err = bpf_map_charge_init(&dtab->map.memory, cost); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 136 | if (err) |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 137 | return -EINVAL; |
Tobias Klauser | 582db7e | 2017-09-18 15:03:46 +0200 | [diff] [blame] | 138 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 139 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
| 140 | dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); |
| 141 | if (!dtab->dev_index_head) |
Björn Töpel | 9636000 | 2019-12-19 07:10:03 +0100 | [diff] [blame] | 142 | goto free_charge; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 143 | |
| 144 | spin_lock_init(&dtab->index_lock); |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 145 | } else { |
| 146 | dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * |
| 147 | sizeof(struct bpf_dtab_netdev *), |
| 148 | dtab->map.numa_node); |
| 149 | if (!dtab->netdev_map) |
Björn Töpel | 9636000 | 2019-12-19 07:10:03 +0100 | [diff] [blame] | 150 | goto free_charge; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 151 | } |
| 152 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 153 | return 0; |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 154 | |
Roman Gushchin | b936ca6 | 2019-05-29 18:03:58 -0700 | [diff] [blame] | 155 | free_charge: |
| 156 | bpf_map_charge_finish(&dtab->map.memory); |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 157 | return -ENOMEM; |
| 158 | } |
| 159 | |
| 160 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) |
| 161 | { |
| 162 | struct bpf_dtab *dtab; |
| 163 | int err; |
| 164 | |
| 165 | if (!capable(CAP_NET_ADMIN)) |
| 166 | return ERR_PTR(-EPERM); |
| 167 | |
| 168 | dtab = kzalloc(sizeof(*dtab), GFP_USER); |
| 169 | if (!dtab) |
| 170 | return ERR_PTR(-ENOMEM); |
| 171 | |
| 172 | err = dev_map_init_map(dtab, attr); |
| 173 | if (err) { |
| 174 | kfree(dtab); |
| 175 | return ERR_PTR(err); |
| 176 | } |
| 177 | |
| 178 | spin_lock(&dev_map_lock); |
| 179 | list_add_tail_rcu(&dtab->list, &dev_map_list); |
| 180 | spin_unlock(&dev_map_lock); |
| 181 | |
| 182 | return &dtab->map; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | static void dev_map_free(struct bpf_map *map) |
| 186 | { |
| 187 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
Björn Töpel | 0536b85 | 2019-12-19 07:09:59 +0100 | [diff] [blame] | 188 | int i; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 189 | |
| 190 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, |
| 191 | * so the programs (can be more than one that used this map) were |
John Fastabend | 42a84a8 | 2020-01-26 16:14:00 -0800 | [diff] [blame] | 192 | * disconnected from events. The following synchronize_rcu() guarantees |
| 193 | * both rcu read critical sections complete and waits for |
| 194 | * preempt-disable regions (NAPI being the relevant context here) so we |
| 195 | * are certain there will be no further reads against the netdev_map and |
| 196 | * all flush operations are complete. Flush operations can only be done |
| 197 | * from NAPI context for this reason. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 198 | */ |
Daniel Borkmann | 274043c | 2017-08-21 01:48:12 +0200 | [diff] [blame] | 199 | |
| 200 | spin_lock(&dev_map_lock); |
| 201 | list_del_rcu(&dtab->list); |
| 202 | spin_unlock(&dev_map_lock); |
| 203 | |
Daniel Borkmann | f6069b9 | 2018-08-17 23:26:14 +0200 | [diff] [blame] | 204 | bpf_clear_redirect_map(map); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 205 | synchronize_rcu(); |
| 206 | |
Eric Dumazet | 2baae35 | 2019-05-13 09:59:16 -0700 | [diff] [blame] | 207 | /* Make sure prior __dev_map_entry_free() have completed. */ |
| 208 | rcu_barrier(); |
| 209 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 210 | if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
| 211 | for (i = 0; i < dtab->n_buckets; i++) { |
| 212 | struct bpf_dtab_netdev *dev; |
| 213 | struct hlist_head *head; |
| 214 | struct hlist_node *next; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 215 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 216 | head = dev_map_index_hash(dtab, i); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 217 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 218 | hlist_for_each_entry_safe(dev, next, head, index_hlist) { |
| 219 | hlist_del_rcu(&dev->index_hlist); |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 220 | dev_put(dev->dev); |
| 221 | kfree(dev); |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | kfree(dtab->dev_index_head); |
| 226 | } else { |
| 227 | for (i = 0; i < dtab->map.max_entries; i++) { |
| 228 | struct bpf_dtab_netdev *dev; |
| 229 | |
| 230 | dev = dtab->netdev_map[i]; |
| 231 | if (!dev) |
| 232 | continue; |
| 233 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 234 | dev_put(dev->dev); |
| 235 | kfree(dev); |
| 236 | } |
| 237 | |
| 238 | bpf_map_area_free(dtab->netdev_map); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 239 | } |
| 240 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 241 | kfree(dtab); |
| 242 | } |
| 243 | |
| 244 | static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 245 | { |
| 246 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 247 | u32 index = key ? *(u32 *)key : U32_MAX; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 248 | u32 *next = next_key; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 249 | |
| 250 | if (index >= dtab->map.max_entries) { |
| 251 | *next = 0; |
| 252 | return 0; |
| 253 | } |
| 254 | |
| 255 | if (index == dtab->map.max_entries - 1) |
| 256 | return -ENOENT; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 257 | *next = index + 1; |
| 258 | return 0; |
| 259 | } |
| 260 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 261 | struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) |
| 262 | { |
| 263 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 264 | struct hlist_head *head = dev_map_index_hash(dtab, key); |
| 265 | struct bpf_dtab_netdev *dev; |
| 266 | |
Amol Grover | 485ec2e | 2020-01-23 17:34:38 +0530 | [diff] [blame] | 267 | hlist_for_each_entry_rcu(dev, head, index_hlist, |
| 268 | lockdep_is_held(&dtab->index_lock)) |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 269 | if (dev->idx == key) |
| 270 | return dev; |
| 271 | |
| 272 | return NULL; |
| 273 | } |
| 274 | |
| 275 | static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, |
| 276 | void *next_key) |
| 277 | { |
| 278 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 279 | u32 idx, *next = next_key; |
| 280 | struct bpf_dtab_netdev *dev, *next_dev; |
| 281 | struct hlist_head *head; |
| 282 | int i = 0; |
| 283 | |
| 284 | if (!key) |
| 285 | goto find_first; |
| 286 | |
| 287 | idx = *(u32 *)key; |
| 288 | |
| 289 | dev = __dev_map_hash_lookup_elem(map, idx); |
| 290 | if (!dev) |
| 291 | goto find_first; |
| 292 | |
| 293 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), |
| 294 | struct bpf_dtab_netdev, index_hlist); |
| 295 | |
| 296 | if (next_dev) { |
| 297 | *next = next_dev->idx; |
| 298 | return 0; |
| 299 | } |
| 300 | |
| 301 | i = idx & (dtab->n_buckets - 1); |
| 302 | i++; |
| 303 | |
| 304 | find_first: |
| 305 | for (; i < dtab->n_buckets; i++) { |
| 306 | head = dev_map_index_hash(dtab, i); |
| 307 | |
| 308 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), |
| 309 | struct bpf_dtab_netdev, |
| 310 | index_hlist); |
| 311 | if (next_dev) { |
| 312 | *next = next_dev->idx; |
| 313 | return 0; |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | return -ENOENT; |
| 318 | } |
| 319 | |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 320 | static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 321 | { |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 322 | struct net_device *dev = bq->dev; |
Jesper Dangaard Brouer | e74de52 | 2018-05-24 16:46:17 +0200 | [diff] [blame] | 323 | int sent = 0, drops = 0, err = 0; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 324 | int i; |
| 325 | |
| 326 | if (unlikely(!bq->count)) |
| 327 | return 0; |
| 328 | |
| 329 | for (i = 0; i < bq->count; i++) { |
| 330 | struct xdp_frame *xdpf = bq->q[i]; |
| 331 | |
| 332 | prefetch(xdpf); |
| 333 | } |
| 334 | |
Jesper Dangaard Brouer | c1ece6b | 2018-05-31 11:00:23 +0200 | [diff] [blame] | 335 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 336 | if (sent < 0) { |
Jesper Dangaard Brouer | e74de52 | 2018-05-24 16:46:17 +0200 | [diff] [blame] | 337 | err = sent; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 338 | sent = 0; |
| 339 | goto error; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 340 | } |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 341 | drops = bq->count - sent; |
| 342 | out: |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 343 | bq->count = 0; |
| 344 | |
Jesper Dangaard Brouer | 58aa94f | 2020-01-16 16:14:46 +0100 | [diff] [blame] | 345 | trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 346 | bq->dev_rx = NULL; |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 347 | __list_del_clearprev(&bq->flush_node); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 348 | return 0; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 349 | error: |
| 350 | /* If ndo_xdp_xmit fails with an errno, no frames have been |
| 351 | * xmit'ed and it's our responsibility to them free all. |
| 352 | */ |
| 353 | for (i = 0; i < bq->count; i++) { |
| 354 | struct xdp_frame *xdpf = bq->q[i]; |
| 355 | |
Björn Töpel | 0536b85 | 2019-12-19 07:09:59 +0100 | [diff] [blame] | 356 | xdp_return_frame_rx_napi(xdpf); |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 357 | drops++; |
| 358 | } |
| 359 | goto out; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 360 | } |
| 361 | |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 362 | /* __dev_flush is called from xdp_do_flush() which _must_ be signaled |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 363 | * from the driver before returning from its napi->poll() routine. The poll() |
| 364 | * routine is called either from busy_poll context or net_rx_action signaled |
| 365 | * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 366 | * net device can be torn down. On devmap tear down we ensure the flush list |
| 367 | * is empty before completing to ensure all flush operations have completed. |
John Fastabend | b23bfa5 | 2020-01-26 16:14:02 -0800 | [diff] [blame] | 368 | * When drivers update the bpf program they may need to ensure any flush ops |
| 369 | * are also complete. Using synchronize_rcu or call_rcu will suffice for this |
| 370 | * because both wait for napi context to exit. |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 371 | */ |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 372 | void __dev_flush(void) |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 373 | { |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 374 | struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 375 | struct xdp_dev_bulk_queue *bq, *tmp; |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 376 | |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 377 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) |
Björn Töpel | 0536b85 | 2019-12-19 07:09:59 +0100 | [diff] [blame] | 378 | bq_xmit_all(bq, XDP_XMIT_FLUSH); |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 379 | } |
| 380 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 381 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or |
| 382 | * update happens in parallel here a dev_put wont happen until after reading the |
| 383 | * ifindex. |
| 384 | */ |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 385 | struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 386 | { |
| 387 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 388 | struct bpf_dtab_netdev *obj; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 389 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 390 | if (key >= map->max_entries) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 391 | return NULL; |
| 392 | |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 393 | obj = READ_ONCE(dtab->netdev_map[key]); |
| 394 | return obj; |
| 395 | } |
| 396 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 397 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
| 398 | * Thus, safe percpu variable access. |
| 399 | */ |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 400 | static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 401 | struct net_device *dev_rx) |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 402 | { |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 403 | struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 404 | struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 405 | |
| 406 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) |
Björn Töpel | 0536b85 | 2019-12-19 07:09:59 +0100 | [diff] [blame] | 407 | bq_xmit_all(bq, 0); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 408 | |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 409 | /* Ingress dev_rx will be the same for all xdp_frame's in |
| 410 | * bulk_queue, because bq stored per-CPU and must be flushed |
| 411 | * from net_device drivers NAPI func end. |
| 412 | */ |
| 413 | if (!bq->dev_rx) |
| 414 | bq->dev_rx = dev_rx; |
| 415 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 416 | bq->q[bq->count++] = xdpf; |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 417 | |
| 418 | if (!bq->flush_node.prev) |
| 419 | list_add(&bq->flush_node, flush_list); |
| 420 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 421 | return 0; |
| 422 | } |
| 423 | |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 424 | static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, |
| 425 | struct net_device *dev_rx) |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 426 | { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 427 | struct xdp_frame *xdpf; |
Toshiaki Makita | d8d7218 | 2018-07-06 11:49:00 +0900 | [diff] [blame] | 428 | int err; |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 429 | |
| 430 | if (!dev->netdev_ops->ndo_xdp_xmit) |
| 431 | return -EOPNOTSUPP; |
| 432 | |
Toshiaki Makita | d8d7218 | 2018-07-06 11:49:00 +0900 | [diff] [blame] | 433 | err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); |
| 434 | if (unlikely(err)) |
| 435 | return err; |
| 436 | |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 437 | xdpf = convert_to_xdp_frame(xdp); |
| 438 | if (unlikely(!xdpf)) |
| 439 | return -EOVERFLOW; |
| 440 | |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 441 | return bq_enqueue(dev, xdpf, dev_rx); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 442 | } |
| 443 | |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 444 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, |
| 445 | struct net_device *dev_rx) |
| 446 | { |
| 447 | return __xdp_enqueue(dev, xdp, dev_rx); |
| 448 | } |
| 449 | |
| 450 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, |
| 451 | struct net_device *dev_rx) |
| 452 | { |
| 453 | struct net_device *dev = dst->dev; |
| 454 | |
| 455 | return __xdp_enqueue(dev, xdp, dev_rx); |
| 456 | } |
| 457 | |
Toshiaki Makita | 6d5fc19 | 2018-06-14 11:07:42 +0900 | [diff] [blame] | 458 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
| 459 | struct bpf_prog *xdp_prog) |
| 460 | { |
| 461 | int err; |
| 462 | |
Toshiaki Makita | d8d7218 | 2018-07-06 11:49:00 +0900 | [diff] [blame] | 463 | err = xdp_ok_fwd_dev(dst->dev, skb->len); |
Toshiaki Makita | 6d5fc19 | 2018-06-14 11:07:42 +0900 | [diff] [blame] | 464 | if (unlikely(err)) |
| 465 | return err; |
| 466 | skb->dev = dst->dev; |
| 467 | generic_xdp_tx(skb, xdp_prog); |
| 468 | |
| 469 | return 0; |
| 470 | } |
| 471 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 472 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 473 | { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 474 | struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); |
Colin Ian King | 71b2c87 | 2018-05-30 16:09:16 +0100 | [diff] [blame] | 475 | struct net_device *dev = obj ? obj->dev : NULL; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 476 | |
| 477 | return dev ? &dev->ifindex : NULL; |
| 478 | } |
| 479 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 480 | static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) |
| 481 | { |
| 482 | struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, |
| 483 | *(u32 *)key); |
| 484 | struct net_device *dev = obj ? obj->dev : NULL; |
| 485 | |
| 486 | return dev ? &dev->ifindex : NULL; |
| 487 | } |
| 488 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 489 | static void __dev_map_entry_free(struct rcu_head *rcu) |
| 490 | { |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 491 | struct bpf_dtab_netdev *dev; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 492 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 493 | dev = container_of(rcu, struct bpf_dtab_netdev, rcu); |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 494 | dev_put(dev->dev); |
| 495 | kfree(dev); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 496 | } |
| 497 | |
| 498 | static int dev_map_delete_elem(struct bpf_map *map, void *key) |
| 499 | { |
| 500 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 501 | struct bpf_dtab_netdev *old_dev; |
| 502 | int k = *(u32 *)key; |
| 503 | |
| 504 | if (k >= map->max_entries) |
| 505 | return -EINVAL; |
| 506 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 507 | /* Use call_rcu() here to ensure any rcu critical sections have |
John Fastabend | 42a84a8 | 2020-01-26 16:14:00 -0800 | [diff] [blame] | 508 | * completed as well as any flush operations because call_rcu |
| 509 | * will wait for preempt-disable region to complete, NAPI in this |
| 510 | * context. And additionally, the driver tear down ensures all |
| 511 | * soft irqs are complete before removing the net device in the |
| 512 | * case of dev_put equals zero. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 513 | */ |
| 514 | old_dev = xchg(&dtab->netdev_map[k], NULL); |
| 515 | if (old_dev) |
| 516 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 517 | return 0; |
| 518 | } |
| 519 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 520 | static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) |
| 521 | { |
| 522 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 523 | struct bpf_dtab_netdev *old_dev; |
| 524 | int k = *(u32 *)key; |
| 525 | unsigned long flags; |
| 526 | int ret = -ENOENT; |
| 527 | |
| 528 | spin_lock_irqsave(&dtab->index_lock, flags); |
| 529 | |
| 530 | old_dev = __dev_map_hash_lookup_elem(map, k); |
| 531 | if (old_dev) { |
| 532 | dtab->items--; |
| 533 | hlist_del_init_rcu(&old_dev->index_hlist); |
| 534 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 535 | ret = 0; |
| 536 | } |
| 537 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 538 | |
| 539 | return ret; |
| 540 | } |
| 541 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 542 | static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, |
| 543 | struct bpf_dtab *dtab, |
| 544 | u32 ifindex, |
| 545 | unsigned int idx) |
| 546 | { |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 547 | struct bpf_dtab_netdev *dev; |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 548 | |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 549 | dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, |
| 550 | dtab->map.numa_node); |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 551 | if (!dev) |
| 552 | return ERR_PTR(-ENOMEM); |
| 553 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 554 | dev->dev = dev_get_by_index(net, ifindex); |
| 555 | if (!dev->dev) { |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 556 | kfree(dev); |
| 557 | return ERR_PTR(-EINVAL); |
| 558 | } |
| 559 | |
| 560 | dev->idx = idx; |
| 561 | dev->dtab = dtab; |
| 562 | |
| 563 | return dev; |
| 564 | } |
| 565 | |
| 566 | static int __dev_map_update_elem(struct net *net, struct bpf_map *map, |
| 567 | void *key, void *value, u64 map_flags) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 568 | { |
| 569 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 570 | struct bpf_dtab_netdev *dev, *old_dev; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 571 | u32 ifindex = *(u32 *)value; |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 572 | u32 i = *(u32 *)key; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 573 | |
| 574 | if (unlikely(map_flags > BPF_EXIST)) |
| 575 | return -EINVAL; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 576 | if (unlikely(i >= dtab->map.max_entries)) |
| 577 | return -E2BIG; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 578 | if (unlikely(map_flags == BPF_NOEXIST)) |
| 579 | return -EEXIST; |
| 580 | |
| 581 | if (!ifindex) { |
| 582 | dev = NULL; |
| 583 | } else { |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 584 | dev = __dev_map_alloc_node(net, dtab, ifindex, i); |
| 585 | if (IS_ERR(dev)) |
| 586 | return PTR_ERR(dev); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 587 | } |
| 588 | |
| 589 | /* Use call_rcu() here to ensure rcu critical sections have completed |
| 590 | * Remembering the driver side flush operation will happen before the |
| 591 | * net device is removed. |
| 592 | */ |
| 593 | old_dev = xchg(&dtab->netdev_map[i], dev); |
| 594 | if (old_dev) |
| 595 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 596 | |
| 597 | return 0; |
| 598 | } |
| 599 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 600 | static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 601 | u64 map_flags) |
| 602 | { |
| 603 | return __dev_map_update_elem(current->nsproxy->net_ns, |
| 604 | map, key, value, map_flags); |
| 605 | } |
| 606 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 607 | static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, |
| 608 | void *key, void *value, u64 map_flags) |
| 609 | { |
| 610 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 611 | struct bpf_dtab_netdev *dev, *old_dev; |
| 612 | u32 ifindex = *(u32 *)value; |
| 613 | u32 idx = *(u32 *)key; |
| 614 | unsigned long flags; |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 615 | int err = -EEXIST; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 616 | |
| 617 | if (unlikely(map_flags > BPF_EXIST || !ifindex)) |
| 618 | return -EINVAL; |
| 619 | |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 620 | spin_lock_irqsave(&dtab->index_lock, flags); |
| 621 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 622 | old_dev = __dev_map_hash_lookup_elem(map, idx); |
| 623 | if (old_dev && (map_flags & BPF_NOEXIST)) |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 624 | goto out_err; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 625 | |
| 626 | dev = __dev_map_alloc_node(net, dtab, ifindex, idx); |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 627 | if (IS_ERR(dev)) { |
| 628 | err = PTR_ERR(dev); |
| 629 | goto out_err; |
| 630 | } |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 631 | |
| 632 | if (old_dev) { |
| 633 | hlist_del_rcu(&old_dev->index_hlist); |
| 634 | } else { |
| 635 | if (dtab->items >= dtab->map.max_entries) { |
| 636 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 637 | call_rcu(&dev->rcu, __dev_map_entry_free); |
| 638 | return -E2BIG; |
| 639 | } |
| 640 | dtab->items++; |
| 641 | } |
| 642 | |
| 643 | hlist_add_head_rcu(&dev->index_hlist, |
| 644 | dev_map_index_hash(dtab, idx)); |
| 645 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 646 | |
| 647 | if (old_dev) |
| 648 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 649 | |
| 650 | return 0; |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 651 | |
| 652 | out_err: |
| 653 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 654 | return err; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 655 | } |
| 656 | |
| 657 | static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, |
| 658 | u64 map_flags) |
| 659 | { |
| 660 | return __dev_map_hash_update_elem(current->nsproxy->net_ns, |
| 661 | map, key, value, map_flags); |
| 662 | } |
| 663 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 664 | const struct bpf_map_ops dev_map_ops = { |
| 665 | .map_alloc = dev_map_alloc, |
| 666 | .map_free = dev_map_free, |
| 667 | .map_get_next_key = dev_map_get_next_key, |
| 668 | .map_lookup_elem = dev_map_lookup_elem, |
| 669 | .map_update_elem = dev_map_update_elem, |
| 670 | .map_delete_elem = dev_map_delete_elem, |
Daniel Borkmann | e8d2bec | 2018-08-12 01:59:17 +0200 | [diff] [blame] | 671 | .map_check_btf = map_check_no_btf, |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 672 | }; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 673 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 674 | const struct bpf_map_ops dev_map_hash_ops = { |
| 675 | .map_alloc = dev_map_alloc, |
| 676 | .map_free = dev_map_free, |
| 677 | .map_get_next_key = dev_map_hash_get_next_key, |
| 678 | .map_lookup_elem = dev_map_hash_lookup_elem, |
| 679 | .map_update_elem = dev_map_hash_update_elem, |
| 680 | .map_delete_elem = dev_map_hash_delete_elem, |
| 681 | .map_check_btf = map_check_no_btf, |
| 682 | }; |
| 683 | |
Toke Høiland-Jørgensen | ce197d8 | 2019-10-19 13:19:31 +0200 | [diff] [blame] | 684 | static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, |
| 685 | struct net_device *netdev) |
| 686 | { |
| 687 | unsigned long flags; |
| 688 | u32 i; |
| 689 | |
| 690 | spin_lock_irqsave(&dtab->index_lock, flags); |
| 691 | for (i = 0; i < dtab->n_buckets; i++) { |
| 692 | struct bpf_dtab_netdev *dev; |
| 693 | struct hlist_head *head; |
| 694 | struct hlist_node *next; |
| 695 | |
| 696 | head = dev_map_index_hash(dtab, i); |
| 697 | |
| 698 | hlist_for_each_entry_safe(dev, next, head, index_hlist) { |
| 699 | if (netdev != dev->dev) |
| 700 | continue; |
| 701 | |
| 702 | dtab->items--; |
| 703 | hlist_del_rcu(&dev->index_hlist); |
| 704 | call_rcu(&dev->rcu, __dev_map_entry_free); |
| 705 | } |
| 706 | } |
| 707 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 708 | } |
| 709 | |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 710 | static int dev_map_notification(struct notifier_block *notifier, |
| 711 | ulong event, void *ptr) |
| 712 | { |
| 713 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); |
| 714 | struct bpf_dtab *dtab; |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 715 | int i, cpu; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 716 | |
| 717 | switch (event) { |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 718 | case NETDEV_REGISTER: |
| 719 | if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) |
| 720 | break; |
| 721 | |
| 722 | /* will be freed in free_netdev() */ |
| 723 | netdev->xdp_bulkq = |
| 724 | __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue), |
| 725 | sizeof(void *), GFP_ATOMIC); |
| 726 | if (!netdev->xdp_bulkq) |
| 727 | return NOTIFY_BAD; |
| 728 | |
| 729 | for_each_possible_cpu(cpu) |
| 730 | per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; |
| 731 | break; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 732 | case NETDEV_UNREGISTER: |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 733 | /* This rcu_read_lock/unlock pair is needed because |
| 734 | * dev_map_list is an RCU list AND to ensure a delete |
| 735 | * operation does not free a netdev_map entry while we |
| 736 | * are comparing it against the netdev being unregistered. |
| 737 | */ |
| 738 | rcu_read_lock(); |
| 739 | list_for_each_entry_rcu(dtab, &dev_map_list, list) { |
Toke Høiland-Jørgensen | ce197d8 | 2019-10-19 13:19:31 +0200 | [diff] [blame] | 740 | if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
| 741 | dev_map_hash_remove_netdev(dtab, netdev); |
| 742 | continue; |
| 743 | } |
| 744 | |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 745 | for (i = 0; i < dtab->map.max_entries; i++) { |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 746 | struct bpf_dtab_netdev *dev, *odev; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 747 | |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 748 | dev = READ_ONCE(dtab->netdev_map[i]); |
Taehee Yoo | f592f80 | 2018-10-24 20:15:17 +0900 | [diff] [blame] | 749 | if (!dev || netdev != dev->dev) |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 750 | continue; |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 751 | odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); |
| 752 | if (dev == odev) |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 753 | call_rcu(&dev->rcu, |
| 754 | __dev_map_entry_free); |
| 755 | } |
| 756 | } |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 757 | rcu_read_unlock(); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 758 | break; |
| 759 | default: |
| 760 | break; |
| 761 | } |
| 762 | return NOTIFY_OK; |
| 763 | } |
| 764 | |
| 765 | static struct notifier_block dev_map_notifier = { |
| 766 | .notifier_call = dev_map_notification, |
| 767 | }; |
| 768 | |
| 769 | static int __init dev_map_init(void) |
| 770 | { |
Björn Töpel | 9636000 | 2019-12-19 07:10:03 +0100 | [diff] [blame] | 771 | int cpu; |
| 772 | |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 773 | /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ |
| 774 | BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != |
| 775 | offsetof(struct _bpf_dtab_netdev, dev)); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 776 | register_netdevice_notifier(&dev_map_notifier); |
Björn Töpel | 9636000 | 2019-12-19 07:10:03 +0100 | [diff] [blame] | 777 | |
| 778 | for_each_possible_cpu(cpu) |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 779 | INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 780 | return 0; |
| 781 | } |
| 782 | |
| 783 | subsys_initcall(dev_map_init); |