Thomas Gleixner | 5b497af | 2019-05-29 07:18:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 2 | /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 3 | */ |
| 4 | |
| 5 | /* Devmaps primary use is as a backend map for XDP BPF helper call |
| 6 | * bpf_redirect_map(). Because XDP is mostly concerned with performance we |
| 7 | * spent some effort to ensure the datapath with redirect maps does not use |
| 8 | * any locking. This is a quick note on the details. |
| 9 | * |
| 10 | * We have three possible paths to get into the devmap control plane bpf |
| 11 | * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall |
| 12 | * will invoke an update, delete, or lookup operation. To ensure updates and |
| 13 | * deletes appear atomic from the datapath side xchg() is used to modify the |
| 14 | * netdev_map array. Then because the datapath does a lookup into the netdev_map |
| 15 | * array (read-only) from an RCU critical section we use call_rcu() to wait for |
| 16 | * an rcu grace period before free'ing the old data structures. This ensures the |
| 17 | * datapath always has a valid copy. However, the datapath does a "flush" |
| 18 | * operation that pushes any pending packets in the driver outside the RCU |
| 19 | * critical section. Each bpf_dtab_netdev tracks these pending operations using |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 20 | * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until |
| 21 | * this list is empty, indicating outstanding flush operations have completed. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 22 | * |
| 23 | * BPF syscalls may race with BPF program calls on any of the update, delete |
| 24 | * or lookup operations. As noted above the xchg() operation also keep the |
| 25 | * netdev_map consistent in this case. From the devmap side BPF programs |
| 26 | * calling into these operations are the same as multiple user space threads |
| 27 | * making system calls. |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 28 | * |
| 29 | * Finally, any of the above may race with a netdev_unregister notifier. The |
| 30 | * unregister notifier must search for net devices in the map structure that |
| 31 | * contain a reference to the net device and remove them. This is a two step |
| 32 | * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) |
| 33 | * check to see if the ifindex is the same as the net_device being removed. |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 34 | * When removing the dev a cmpxchg() is used to ensure the correct dev is |
| 35 | * removed, in the case of a concurrent update or delete operation it is |
| 36 | * possible that the initially referenced dev is no longer in the map. As the |
| 37 | * notifier hook walks the map we know that new dev references can not be |
| 38 | * added by the user because core infrastructure ensures dev_get_by_index() |
| 39 | * calls will fail at this point. |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 40 | * |
| 41 | * The devmap_hash type is a map type which interprets keys as ifindexes and |
| 42 | * indexes these using a hashmap. This allows maps that use ifindex as key to be |
| 43 | * densely packed instead of having holes in the lookup array for unused |
| 44 | * ifindexes. The setup and packet enqueue/send code is shared between the two |
| 45 | * types of devmap; only the lookup and insertion is different. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 46 | */ |
| 47 | #include <linux/bpf.h> |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 48 | #include <net/xdp.h> |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 49 | #include <linux/filter.h> |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 50 | #include <trace/events/xdp.h> |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 51 | |
Chenbo Feng | 6e71b04 | 2017-10-18 13:00:22 -0700 | [diff] [blame] | 52 | #define DEV_CREATE_FLAG_MASK \ |
| 53 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
| 54 | |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 55 | struct xdp_dev_bulk_queue { |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 56 | struct xdp_frame *q[DEV_MAP_BULK_SIZE]; |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 57 | struct list_head flush_node; |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 58 | struct net_device *dev; |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 59 | struct net_device *dev_rx; |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 60 | struct bpf_prog *xdp_prog; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 61 | unsigned int count; |
| 62 | }; |
| 63 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 64 | struct bpf_dtab_netdev { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 65 | struct net_device *dev; /* must be first member, due to tracepoint */ |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 66 | struct hlist_node index_hlist; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 67 | struct bpf_dtab *dtab; |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 68 | struct bpf_prog *xdp_prog; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 69 | struct rcu_head rcu; |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 70 | unsigned int idx; |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 71 | struct bpf_devmap_val val; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 72 | }; |
| 73 | |
| 74 | struct bpf_dtab { |
| 75 | struct bpf_map map; |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 76 | struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */ |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 77 | struct list_head list; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 78 | |
| 79 | /* these are only used for DEVMAP_HASH type maps */ |
| 80 | struct hlist_head *dev_index_head; |
| 81 | spinlock_t index_lock; |
| 82 | unsigned int items; |
| 83 | u32 n_buckets; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 84 | }; |
| 85 | |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 86 | static DEFINE_PER_CPU(struct list_head, dev_flush_list); |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 87 | static DEFINE_SPINLOCK(dev_map_lock); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 88 | static LIST_HEAD(dev_map_list); |
| 89 | |
Toke Høiland-Jørgensen | 99c5106 | 2020-06-16 16:28:29 +0200 | [diff] [blame] | 90 | static struct hlist_head *dev_map_create_hash(unsigned int entries, |
| 91 | int numa_node) |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 92 | { |
| 93 | int i; |
| 94 | struct hlist_head *hash; |
| 95 | |
Bui Quang Minh | 7dd5d43 | 2021-06-13 21:34:39 +0700 | [diff] [blame] | 96 | hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 97 | if (hash != NULL) |
| 98 | for (i = 0; i < entries; i++) |
| 99 | INIT_HLIST_HEAD(&hash[i]); |
| 100 | |
| 101 | return hash; |
| 102 | } |
| 103 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 104 | static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, |
| 105 | int idx) |
| 106 | { |
| 107 | return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; |
| 108 | } |
| 109 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 110 | static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 111 | { |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 112 | u32 valsize = attr->value_size; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 113 | |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 114 | /* check sanity of attributes. 2 value sizes supported: |
| 115 | * 4 bytes: ifindex |
| 116 | * 8 bytes: ifindex + prog fd |
| 117 | */ |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 118 | if (attr->max_entries == 0 || attr->key_size != 4 || |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 119 | (valsize != offsetofend(struct bpf_devmap_val, ifindex) && |
| 120 | valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) || |
| 121 | attr->map_flags & ~DEV_CREATE_FLAG_MASK) |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 122 | return -EINVAL; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 123 | |
Toke Høiland-Jørgensen | 0cdbb4b | 2019-06-28 11:12:35 +0200 | [diff] [blame] | 124 | /* Lookup returns a pointer straight to dev->ifindex, so make sure the |
| 125 | * verifier prevents writes from the BPF side |
| 126 | */ |
| 127 | attr->map_flags |= BPF_F_RDONLY_PROG; |
| 128 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 129 | |
Jakub Kicinski | bd47564 | 2018-01-11 20:29:06 -0800 | [diff] [blame] | 130 | bpf_map_init_from_attr(&dtab->map, attr); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 131 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 132 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
| 133 | dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); |
| 134 | |
| 135 | if (!dtab->n_buckets) /* Overflow check */ |
| 136 | return -EINVAL; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 137 | } |
| 138 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 139 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
Toke Høiland-Jørgensen | 99c5106 | 2020-06-16 16:28:29 +0200 | [diff] [blame] | 140 | dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, |
| 141 | dtab->map.numa_node); |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 142 | if (!dtab->dev_index_head) |
Roman Gushchin | 844f157 | 2020-12-01 13:58:48 -0800 | [diff] [blame] | 143 | return -ENOMEM; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 144 | |
| 145 | spin_lock_init(&dtab->index_lock); |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 146 | } else { |
Bui Quang Minh | 7dd5d43 | 2021-06-13 21:34:39 +0700 | [diff] [blame] | 147 | dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 148 | sizeof(struct bpf_dtab_netdev *), |
| 149 | dtab->map.numa_node); |
| 150 | if (!dtab->netdev_map) |
Roman Gushchin | 844f157 | 2020-12-01 13:58:48 -0800 | [diff] [blame] | 151 | return -ENOMEM; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 152 | } |
| 153 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 154 | return 0; |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) |
| 158 | { |
| 159 | struct bpf_dtab *dtab; |
| 160 | int err; |
| 161 | |
| 162 | if (!capable(CAP_NET_ADMIN)) |
| 163 | return ERR_PTR(-EPERM); |
| 164 | |
Roman Gushchin | 1440290 | 2020-12-01 13:58:37 -0800 | [diff] [blame] | 165 | dtab = kzalloc(sizeof(*dtab), GFP_USER | __GFP_ACCOUNT); |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 166 | if (!dtab) |
| 167 | return ERR_PTR(-ENOMEM); |
| 168 | |
| 169 | err = dev_map_init_map(dtab, attr); |
| 170 | if (err) { |
| 171 | kfree(dtab); |
| 172 | return ERR_PTR(err); |
| 173 | } |
| 174 | |
| 175 | spin_lock(&dev_map_lock); |
| 176 | list_add_tail_rcu(&dtab->list, &dev_map_list); |
| 177 | spin_unlock(&dev_map_lock); |
| 178 | |
| 179 | return &dtab->map; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | static void dev_map_free(struct bpf_map *map) |
| 183 | { |
| 184 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
Björn Töpel | 0536b85 | 2019-12-19 07:09:59 +0100 | [diff] [blame] | 185 | int i; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 186 | |
| 187 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, |
| 188 | * so the programs (can be more than one that used this map) were |
John Fastabend | 42a84a8 | 2020-01-26 16:14:00 -0800 | [diff] [blame] | 189 | * disconnected from events. The following synchronize_rcu() guarantees |
| 190 | * both rcu read critical sections complete and waits for |
| 191 | * preempt-disable regions (NAPI being the relevant context here) so we |
| 192 | * are certain there will be no further reads against the netdev_map and |
| 193 | * all flush operations are complete. Flush operations can only be done |
| 194 | * from NAPI context for this reason. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 195 | */ |
Daniel Borkmann | 274043c | 2017-08-21 01:48:12 +0200 | [diff] [blame] | 196 | |
| 197 | spin_lock(&dev_map_lock); |
| 198 | list_del_rcu(&dtab->list); |
| 199 | spin_unlock(&dev_map_lock); |
| 200 | |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 201 | bpf_clear_redirect_map(map); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 202 | synchronize_rcu(); |
| 203 | |
Eric Dumazet | 2baae35 | 2019-05-13 09:59:16 -0700 | [diff] [blame] | 204 | /* Make sure prior __dev_map_entry_free() have completed. */ |
| 205 | rcu_barrier(); |
| 206 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 207 | if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
| 208 | for (i = 0; i < dtab->n_buckets; i++) { |
| 209 | struct bpf_dtab_netdev *dev; |
| 210 | struct hlist_head *head; |
| 211 | struct hlist_node *next; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 212 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 213 | head = dev_map_index_hash(dtab, i); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 214 | |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 215 | hlist_for_each_entry_safe(dev, next, head, index_hlist) { |
| 216 | hlist_del_rcu(&dev->index_hlist); |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 217 | if (dev->xdp_prog) |
| 218 | bpf_prog_put(dev->xdp_prog); |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 219 | dev_put(dev->dev); |
| 220 | kfree(dev); |
| 221 | } |
| 222 | } |
| 223 | |
Toke Høiland-Jørgensen | 99c5106 | 2020-06-16 16:28:29 +0200 | [diff] [blame] | 224 | bpf_map_area_free(dtab->dev_index_head); |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 225 | } else { |
| 226 | for (i = 0; i < dtab->map.max_entries; i++) { |
| 227 | struct bpf_dtab_netdev *dev; |
| 228 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 229 | dev = rcu_dereference_raw(dtab->netdev_map[i]); |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 230 | if (!dev) |
| 231 | continue; |
| 232 | |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 233 | if (dev->xdp_prog) |
| 234 | bpf_prog_put(dev->xdp_prog); |
Toke Høiland-Jørgensen | 071cdec | 2019-11-21 14:36:12 +0100 | [diff] [blame] | 235 | dev_put(dev->dev); |
| 236 | kfree(dev); |
| 237 | } |
| 238 | |
| 239 | bpf_map_area_free(dtab->netdev_map); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 240 | } |
| 241 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 242 | kfree(dtab); |
| 243 | } |
| 244 | |
| 245 | static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 246 | { |
| 247 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 248 | u32 index = key ? *(u32 *)key : U32_MAX; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 249 | u32 *next = next_key; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 250 | |
| 251 | if (index >= dtab->map.max_entries) { |
| 252 | *next = 0; |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | if (index == dtab->map.max_entries - 1) |
| 257 | return -ENOENT; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 258 | *next = index + 1; |
| 259 | return 0; |
| 260 | } |
| 261 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 262 | /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or |
| 263 | * by local_bh_disable() (from XDP calls inside NAPI). The |
| 264 | * rcu_read_lock_bh_held() below makes lockdep accept both. |
| 265 | */ |
Björn Töpel | e6a4750 | 2021-03-08 12:29:06 +0100 | [diff] [blame] | 266 | static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 267 | { |
| 268 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 269 | struct hlist_head *head = dev_map_index_hash(dtab, key); |
| 270 | struct bpf_dtab_netdev *dev; |
| 271 | |
Amol Grover | 485ec2e | 2020-01-23 17:34:38 +0530 | [diff] [blame] | 272 | hlist_for_each_entry_rcu(dev, head, index_hlist, |
| 273 | lockdep_is_held(&dtab->index_lock)) |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 274 | if (dev->idx == key) |
| 275 | return dev; |
| 276 | |
| 277 | return NULL; |
| 278 | } |
| 279 | |
| 280 | static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, |
| 281 | void *next_key) |
| 282 | { |
| 283 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 284 | u32 idx, *next = next_key; |
| 285 | struct bpf_dtab_netdev *dev, *next_dev; |
| 286 | struct hlist_head *head; |
| 287 | int i = 0; |
| 288 | |
| 289 | if (!key) |
| 290 | goto find_first; |
| 291 | |
| 292 | idx = *(u32 *)key; |
| 293 | |
| 294 | dev = __dev_map_hash_lookup_elem(map, idx); |
| 295 | if (!dev) |
| 296 | goto find_first; |
| 297 | |
| 298 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), |
| 299 | struct bpf_dtab_netdev, index_hlist); |
| 300 | |
| 301 | if (next_dev) { |
| 302 | *next = next_dev->idx; |
| 303 | return 0; |
| 304 | } |
| 305 | |
| 306 | i = idx & (dtab->n_buckets - 1); |
| 307 | i++; |
| 308 | |
| 309 | find_first: |
| 310 | for (; i < dtab->n_buckets; i++) { |
| 311 | head = dev_map_index_hash(dtab, i); |
| 312 | |
| 313 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), |
| 314 | struct bpf_dtab_netdev, |
| 315 | index_hlist); |
| 316 | if (next_dev) { |
| 317 | *next = next_dev->idx; |
| 318 | return 0; |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | return -ENOENT; |
| 323 | } |
| 324 | |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 325 | static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog, |
| 326 | struct xdp_frame **frames, int n, |
| 327 | struct net_device *dev) |
| 328 | { |
| 329 | struct xdp_txq_info txq = { .dev = dev }; |
| 330 | struct xdp_buff xdp; |
| 331 | int i, nframes = 0; |
| 332 | |
| 333 | for (i = 0; i < n; i++) { |
| 334 | struct xdp_frame *xdpf = frames[i]; |
| 335 | u32 act; |
| 336 | int err; |
| 337 | |
| 338 | xdp_convert_frame_to_buff(xdpf, &xdp); |
| 339 | xdp.txq = &txq; |
| 340 | |
| 341 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 342 | switch (act) { |
| 343 | case XDP_PASS: |
| 344 | err = xdp_update_frame_from_buff(&xdp, xdpf); |
| 345 | if (unlikely(err < 0)) |
| 346 | xdp_return_frame_rx_napi(xdpf); |
| 347 | else |
| 348 | frames[nframes++] = xdpf; |
| 349 | break; |
| 350 | default: |
Paolo Abeni | c8064e5 | 2021-11-30 11:08:07 +0100 | [diff] [blame] | 351 | bpf_warn_invalid_xdp_action(NULL, xdp_prog, act); |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 352 | fallthrough; |
| 353 | case XDP_ABORTED: |
| 354 | trace_xdp_exception(dev, xdp_prog, act); |
| 355 | fallthrough; |
| 356 | case XDP_DROP: |
| 357 | xdp_return_frame_rx_napi(xdpf); |
| 358 | break; |
| 359 | } |
| 360 | } |
| 361 | return nframes; /* sent frames count */ |
| 362 | } |
| 363 | |
Björn Töpel | ebc4ecd | 2020-09-01 10:39:28 +0200 | [diff] [blame] | 364 | static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 365 | { |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 366 | struct net_device *dev = bq->dev; |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 367 | unsigned int cnt = bq->count; |
Hangbin Liu | e8e0f0f | 2021-05-27 22:43:56 -0400 | [diff] [blame] | 368 | int sent = 0, err = 0; |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 369 | int to_send = cnt; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 370 | int i; |
| 371 | |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 372 | if (unlikely(!cnt)) |
Björn Töpel | ebc4ecd | 2020-09-01 10:39:28 +0200 | [diff] [blame] | 373 | return; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 374 | |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 375 | for (i = 0; i < cnt; i++) { |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 376 | struct xdp_frame *xdpf = bq->q[i]; |
| 377 | |
| 378 | prefetch(xdpf); |
| 379 | } |
| 380 | |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 381 | if (bq->xdp_prog) { |
| 382 | to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); |
| 383 | if (!to_send) |
| 384 | goto out; |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 388 | if (sent < 0) { |
Lorenzo Bianconi | fdc1397 | 2021-03-08 12:06:58 +0100 | [diff] [blame] | 389 | /* If ndo_xdp_xmit fails with an errno, no frames have |
| 390 | * been xmit'ed. |
| 391 | */ |
Jesper Dangaard Brouer | e74de52 | 2018-05-24 16:46:17 +0200 | [diff] [blame] | 392 | err = sent; |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 393 | sent = 0; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 394 | } |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 395 | |
Lorenzo Bianconi | fdc1397 | 2021-03-08 12:06:58 +0100 | [diff] [blame] | 396 | /* If not all frames have been transmitted, it is our |
| 397 | * responsibility to free them |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 398 | */ |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 399 | for (i = sent; unlikely(i < to_send); i++) |
Lorenzo Bianconi | fdc1397 | 2021-03-08 12:06:58 +0100 | [diff] [blame] | 400 | xdp_return_frame_rx_napi(bq->q[i]); |
Jesper Dangaard Brouer | 735fc40 | 2018-05-24 16:46:12 +0200 | [diff] [blame] | 401 | |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 402 | out: |
Lorenzo Bianconi | fdc1397 | 2021-03-08 12:06:58 +0100 | [diff] [blame] | 403 | bq->count = 0; |
Hangbin Liu | e8e0f0f | 2021-05-27 22:43:56 -0400 | [diff] [blame] | 404 | trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 405 | } |
| 406 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 407 | /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the |
| 408 | * driver before returning from its napi->poll() routine. See the comment above |
| 409 | * xdp_do_flush() in filter.c. |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 410 | */ |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 411 | void __dev_flush(void) |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 412 | { |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 413 | struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 414 | struct xdp_dev_bulk_queue *bq, *tmp; |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 415 | |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 416 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { |
Björn Töpel | 0536b85 | 2019-12-19 07:09:59 +0100 | [diff] [blame] | 417 | bq_xmit_all(bq, XDP_XMIT_FLUSH); |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 418 | bq->dev_rx = NULL; |
| 419 | bq->xdp_prog = NULL; |
| 420 | __list_del_clearprev(&bq->flush_node); |
| 421 | } |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 422 | } |
| 423 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 424 | /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or |
| 425 | * by local_bh_disable() (from XDP calls inside NAPI). The |
| 426 | * rcu_read_lock_bh_held() below makes lockdep accept both. |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 427 | */ |
Björn Töpel | e6a4750 | 2021-03-08 12:29:06 +0100 | [diff] [blame] | 428 | static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 429 | { |
| 430 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 431 | struct bpf_dtab_netdev *obj; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 432 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 433 | if (key >= map->max_entries) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 434 | return NULL; |
| 435 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 436 | obj = rcu_dereference_check(dtab->netdev_map[key], |
| 437 | rcu_read_lock_bh_held()); |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 438 | return obj; |
| 439 | } |
| 440 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 441 | /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu |
| 442 | * variable access, and map elements stick around. See comment above |
| 443 | * xdp_do_flush() in filter.c. |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 444 | */ |
Björn Töpel | ebc4ecd | 2020-09-01 10:39:28 +0200 | [diff] [blame] | 445 | static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 446 | struct net_device *dev_rx, struct bpf_prog *xdp_prog) |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 447 | { |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 448 | struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 449 | struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 450 | |
| 451 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) |
Björn Töpel | 0536b85 | 2019-12-19 07:09:59 +0100 | [diff] [blame] | 452 | bq_xmit_all(bq, 0); |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 453 | |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 454 | /* Ingress dev_rx will be the same for all xdp_frame's in |
| 455 | * bulk_queue, because bq stored per-CPU and must be flushed |
| 456 | * from net_device drivers NAPI func end. |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 457 | * |
| 458 | * Do the same with xdp_prog and flush_list since these fields |
| 459 | * are only ever modified together. |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 460 | */ |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 461 | if (!bq->dev_rx) { |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 462 | bq->dev_rx = dev_rx; |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 463 | bq->xdp_prog = xdp_prog; |
| 464 | list_add(&bq->flush_node, flush_list); |
| 465 | } |
Jesper Dangaard Brouer | 38edddb | 2018-05-24 16:45:57 +0200 | [diff] [blame] | 466 | |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 467 | bq->q[bq->count++] = xdpf; |
Jesper Dangaard Brouer | 5d053f9 | 2018-05-24 16:45:51 +0200 | [diff] [blame] | 468 | } |
| 469 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 470 | static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 471 | struct net_device *dev_rx, |
| 472 | struct bpf_prog *xdp_prog) |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 473 | { |
Toshiaki Makita | d8d7218 | 2018-07-06 11:49:00 +0900 | [diff] [blame] | 474 | int err; |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 475 | |
| 476 | if (!dev->netdev_ops->ndo_xdp_xmit) |
| 477 | return -EOPNOTSUPP; |
| 478 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 479 | err = xdp_ok_fwd_dev(dev, xdpf->len); |
Toshiaki Makita | d8d7218 | 2018-07-06 11:49:00 +0900 | [diff] [blame] | 480 | if (unlikely(err)) |
| 481 | return err; |
| 482 | |
Jesper Dangaard Brouer | cb261b5 | 2021-05-19 17:07:44 +0800 | [diff] [blame] | 483 | bq_enqueue(dev, xdpf, dev_rx, xdp_prog); |
Björn Töpel | ebc4ecd | 2020-09-01 10:39:28 +0200 | [diff] [blame] | 484 | return 0; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 485 | } |
| 486 | |
Kumar Kartikeya Dwivedi | 2ea5eab | 2021-07-02 16:48:24 +0530 | [diff] [blame] | 487 | static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst) |
| 488 | { |
| 489 | struct xdp_txq_info txq = { .dev = dst->dev }; |
| 490 | struct xdp_buff xdp; |
| 491 | u32 act; |
| 492 | |
| 493 | if (!dst->xdp_prog) |
| 494 | return XDP_PASS; |
| 495 | |
| 496 | __skb_pull(skb, skb->mac_len); |
| 497 | xdp.txq = &txq; |
| 498 | |
| 499 | act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog); |
| 500 | switch (act) { |
| 501 | case XDP_PASS: |
| 502 | __skb_push(skb, skb->mac_len); |
| 503 | break; |
| 504 | default: |
Paolo Abeni | c8064e5 | 2021-11-30 11:08:07 +0100 | [diff] [blame] | 505 | bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act); |
Kumar Kartikeya Dwivedi | 2ea5eab | 2021-07-02 16:48:24 +0530 | [diff] [blame] | 506 | fallthrough; |
| 507 | case XDP_ABORTED: |
| 508 | trace_xdp_exception(dst->dev, dst->xdp_prog, act); |
| 509 | fallthrough; |
| 510 | case XDP_DROP: |
| 511 | kfree_skb(skb); |
| 512 | break; |
| 513 | } |
| 514 | |
| 515 | return act; |
| 516 | } |
| 517 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 518 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 519 | struct net_device *dev_rx) |
| 520 | { |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 521 | return __xdp_enqueue(dev, xdpf, dev_rx, NULL); |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 522 | } |
| 523 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 524 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 525 | struct net_device *dev_rx) |
| 526 | { |
| 527 | struct net_device *dev = dst->dev; |
| 528 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 529 | return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog); |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 530 | } |
| 531 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 532 | static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf) |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 533 | { |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 534 | if (!obj || |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 535 | !obj->dev->netdev_ops->ndo_xdp_xmit) |
| 536 | return false; |
| 537 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 538 | if (xdp_ok_fwd_dev(obj->dev, xdpf->len)) |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 539 | return false; |
| 540 | |
| 541 | return true; |
| 542 | } |
| 543 | |
| 544 | static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj, |
| 545 | struct net_device *dev_rx, |
| 546 | struct xdp_frame *xdpf) |
| 547 | { |
| 548 | struct xdp_frame *nxdpf; |
| 549 | |
| 550 | nxdpf = xdpf_clone(xdpf); |
| 551 | if (!nxdpf) |
| 552 | return -ENOMEM; |
| 553 | |
| 554 | bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog); |
| 555 | |
| 556 | return 0; |
| 557 | } |
| 558 | |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 559 | static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex) |
| 560 | { |
| 561 | while (num_excluded--) { |
| 562 | if (ifindex == excluded[num_excluded]) |
| 563 | return true; |
| 564 | } |
| 565 | return false; |
| 566 | } |
| 567 | |
| 568 | /* Get ifindex of each upper device. 'indexes' must be able to hold at |
| 569 | * least MAX_NEST_DEV elements. |
| 570 | * Returns the number of ifindexes added. |
| 571 | */ |
| 572 | static int get_upper_ifindexes(struct net_device *dev, int *indexes) |
| 573 | { |
| 574 | struct net_device *upper; |
| 575 | struct list_head *iter; |
| 576 | int n = 0; |
| 577 | |
| 578 | netdev_for_each_upper_dev_rcu(dev, upper, iter) { |
| 579 | indexes[n++] = upper->ifindex; |
| 580 | } |
| 581 | return n; |
| 582 | } |
| 583 | |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 584 | int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 585 | struct bpf_map *map, bool exclude_ingress) |
| 586 | { |
| 587 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 588 | struct bpf_dtab_netdev *dst, *last_dst = NULL; |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 589 | int excluded_devices[1+MAX_NEST_DEV]; |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 590 | struct hlist_head *head; |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 591 | int num_excluded = 0; |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 592 | unsigned int i; |
| 593 | int err; |
| 594 | |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 595 | if (exclude_ingress) { |
| 596 | num_excluded = get_upper_ifindexes(dev_rx, excluded_devices); |
| 597 | excluded_devices[num_excluded++] = dev_rx->ifindex; |
| 598 | } |
| 599 | |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 600 | if (map->map_type == BPF_MAP_TYPE_DEVMAP) { |
| 601 | for (i = 0; i < map->max_entries; i++) { |
Toke Høiland-Jørgensen | 0fc4dcc | 2021-06-29 11:39:07 +0200 | [diff] [blame] | 602 | dst = rcu_dereference_check(dtab->netdev_map[i], |
| 603 | rcu_read_lock_bh_held()); |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 604 | if (!is_valid_dst(dst, xdpf)) |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 605 | continue; |
| 606 | |
| 607 | if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 608 | continue; |
| 609 | |
| 610 | /* we only need n-1 clones; last_dst enqueued below */ |
| 611 | if (!last_dst) { |
| 612 | last_dst = dst; |
| 613 | continue; |
| 614 | } |
| 615 | |
| 616 | err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); |
| 617 | if (err) |
| 618 | return err; |
| 619 | |
| 620 | last_dst = dst; |
| 621 | } |
| 622 | } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ |
| 623 | for (i = 0; i < dtab->n_buckets; i++) { |
| 624 | head = dev_map_index_hash(dtab, i); |
| 625 | hlist_for_each_entry_rcu(dst, head, index_hlist, |
| 626 | lockdep_is_held(&dtab->index_lock)) { |
Toke Høiland-Jørgensen | d53ad5d | 2022-01-03 16:08:09 +0100 | [diff] [blame] | 627 | if (!is_valid_dst(dst, xdpf)) |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 628 | continue; |
| 629 | |
| 630 | if (is_ifindex_excluded(excluded_devices, num_excluded, |
| 631 | dst->dev->ifindex)) |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 632 | continue; |
| 633 | |
| 634 | /* we only need n-1 clones; last_dst enqueued below */ |
| 635 | if (!last_dst) { |
| 636 | last_dst = dst; |
| 637 | continue; |
| 638 | } |
| 639 | |
| 640 | err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf); |
| 641 | if (err) |
| 642 | return err; |
| 643 | |
| 644 | last_dst = dst; |
| 645 | } |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | /* consume the last copy of the frame */ |
| 650 | if (last_dst) |
| 651 | bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog); |
| 652 | else |
| 653 | xdp_return_frame_rx_napi(xdpf); /* dtab is empty */ |
| 654 | |
| 655 | return 0; |
| 656 | } |
| 657 | |
Toshiaki Makita | 6d5fc19 | 2018-06-14 11:07:42 +0900 | [diff] [blame] | 658 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
| 659 | struct bpf_prog *xdp_prog) |
| 660 | { |
| 661 | int err; |
| 662 | |
Toshiaki Makita | d8d7218 | 2018-07-06 11:49:00 +0900 | [diff] [blame] | 663 | err = xdp_ok_fwd_dev(dst->dev, skb->len); |
Toshiaki Makita | 6d5fc19 | 2018-06-14 11:07:42 +0900 | [diff] [blame] | 664 | if (unlikely(err)) |
| 665 | return err; |
Kumar Kartikeya Dwivedi | 2ea5eab | 2021-07-02 16:48:24 +0530 | [diff] [blame] | 666 | |
| 667 | /* Redirect has already succeeded semantically at this point, so we just |
| 668 | * return 0 even if packet is dropped. Helper below takes care of |
| 669 | * freeing skb. |
| 670 | */ |
| 671 | if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS) |
| 672 | return 0; |
| 673 | |
Toshiaki Makita | 6d5fc19 | 2018-06-14 11:07:42 +0900 | [diff] [blame] | 674 | skb->dev = dst->dev; |
| 675 | generic_xdp_tx(skb, xdp_prog); |
| 676 | |
| 677 | return 0; |
| 678 | } |
| 679 | |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 680 | static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst, |
| 681 | struct sk_buff *skb, |
| 682 | struct bpf_prog *xdp_prog) |
| 683 | { |
| 684 | struct sk_buff *nskb; |
| 685 | int err; |
| 686 | |
| 687 | nskb = skb_clone(skb, GFP_ATOMIC); |
| 688 | if (!nskb) |
| 689 | return -ENOMEM; |
| 690 | |
| 691 | err = dev_map_generic_redirect(dst, nskb, xdp_prog); |
| 692 | if (unlikely(err)) { |
| 693 | consume_skb(nskb); |
| 694 | return err; |
| 695 | } |
| 696 | |
| 697 | return 0; |
| 698 | } |
| 699 | |
| 700 | int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, |
| 701 | struct bpf_prog *xdp_prog, struct bpf_map *map, |
| 702 | bool exclude_ingress) |
| 703 | { |
| 704 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 705 | struct bpf_dtab_netdev *dst, *last_dst = NULL; |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 706 | int excluded_devices[1+MAX_NEST_DEV]; |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 707 | struct hlist_head *head; |
| 708 | struct hlist_node *next; |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 709 | int num_excluded = 0; |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 710 | unsigned int i; |
| 711 | int err; |
| 712 | |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 713 | if (exclude_ingress) { |
| 714 | num_excluded = get_upper_ifindexes(dev, excluded_devices); |
| 715 | excluded_devices[num_excluded++] = dev->ifindex; |
| 716 | } |
| 717 | |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 718 | if (map->map_type == BPF_MAP_TYPE_DEVMAP) { |
| 719 | for (i = 0; i < map->max_entries; i++) { |
Toke Høiland-Jørgensen | 0fc4dcc | 2021-06-29 11:39:07 +0200 | [diff] [blame] | 720 | dst = rcu_dereference_check(dtab->netdev_map[i], |
| 721 | rcu_read_lock_bh_held()); |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 722 | if (!dst) |
| 723 | continue; |
| 724 | |
| 725 | if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 726 | continue; |
| 727 | |
| 728 | /* we only need n-1 clones; last_dst enqueued below */ |
| 729 | if (!last_dst) { |
| 730 | last_dst = dst; |
| 731 | continue; |
| 732 | } |
| 733 | |
| 734 | err = dev_map_redirect_clone(last_dst, skb, xdp_prog); |
| 735 | if (err) |
| 736 | return err; |
| 737 | |
| 738 | last_dst = dst; |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 739 | |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 740 | } |
| 741 | } else { /* BPF_MAP_TYPE_DEVMAP_HASH */ |
| 742 | for (i = 0; i < dtab->n_buckets; i++) { |
| 743 | head = dev_map_index_hash(dtab, i); |
| 744 | hlist_for_each_entry_safe(dst, next, head, index_hlist) { |
Jussi Maki | aeea1b8 | 2021-07-31 05:57:35 +0000 | [diff] [blame] | 745 | if (!dst) |
| 746 | continue; |
| 747 | |
| 748 | if (is_ifindex_excluded(excluded_devices, num_excluded, |
| 749 | dst->dev->ifindex)) |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 750 | continue; |
| 751 | |
| 752 | /* we only need n-1 clones; last_dst enqueued below */ |
| 753 | if (!last_dst) { |
| 754 | last_dst = dst; |
| 755 | continue; |
| 756 | } |
| 757 | |
| 758 | err = dev_map_redirect_clone(last_dst, skb, xdp_prog); |
| 759 | if (err) |
| 760 | return err; |
| 761 | |
| 762 | last_dst = dst; |
| 763 | } |
| 764 | } |
| 765 | } |
| 766 | |
| 767 | /* consume the first skb and return */ |
| 768 | if (last_dst) |
| 769 | return dev_map_generic_redirect(last_dst, skb, xdp_prog); |
| 770 | |
| 771 | /* dtab is empty */ |
| 772 | consume_skb(skb); |
| 773 | return 0; |
| 774 | } |
| 775 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 776 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) |
John Fastabend | 11393cc | 2017-07-17 09:29:40 -0700 | [diff] [blame] | 777 | { |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 778 | struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 779 | |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 780 | return obj ? &obj->val : NULL; |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 781 | } |
| 782 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 783 | static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) |
| 784 | { |
| 785 | struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, |
| 786 | *(u32 *)key); |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 787 | return obj ? &obj->val : NULL; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 788 | } |
| 789 | |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 790 | static void __dev_map_entry_free(struct rcu_head *rcu) |
| 791 | { |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 792 | struct bpf_dtab_netdev *dev; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 793 | |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 794 | dev = container_of(rcu, struct bpf_dtab_netdev, rcu); |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 795 | if (dev->xdp_prog) |
| 796 | bpf_prog_put(dev->xdp_prog); |
Daniel Borkmann | af4d045 | 2017-08-23 01:47:54 +0200 | [diff] [blame] | 797 | dev_put(dev->dev); |
| 798 | kfree(dev); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | static int dev_map_delete_elem(struct bpf_map *map, void *key) |
| 802 | { |
| 803 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 804 | struct bpf_dtab_netdev *old_dev; |
| 805 | int k = *(u32 *)key; |
| 806 | |
| 807 | if (k >= map->max_entries) |
| 808 | return -EINVAL; |
| 809 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 810 | old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL)); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 811 | if (old_dev) |
| 812 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 813 | return 0; |
| 814 | } |
| 815 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 816 | static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) |
| 817 | { |
| 818 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 819 | struct bpf_dtab_netdev *old_dev; |
| 820 | int k = *(u32 *)key; |
| 821 | unsigned long flags; |
| 822 | int ret = -ENOENT; |
| 823 | |
| 824 | spin_lock_irqsave(&dtab->index_lock, flags); |
| 825 | |
| 826 | old_dev = __dev_map_hash_lookup_elem(map, k); |
| 827 | if (old_dev) { |
| 828 | dtab->items--; |
| 829 | hlist_del_init_rcu(&old_dev->index_hlist); |
| 830 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 831 | ret = 0; |
| 832 | } |
| 833 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 834 | |
| 835 | return ret; |
| 836 | } |
| 837 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 838 | static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, |
| 839 | struct bpf_dtab *dtab, |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 840 | struct bpf_devmap_val *val, |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 841 | unsigned int idx) |
| 842 | { |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 843 | struct bpf_prog *prog = NULL; |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 844 | struct bpf_dtab_netdev *dev; |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 845 | |
Roman Gushchin | 1440290 | 2020-12-01 13:58:37 -0800 | [diff] [blame] | 846 | dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), |
| 847 | GFP_ATOMIC | __GFP_NOWARN, |
| 848 | dtab->map.numa_node); |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 849 | if (!dev) |
| 850 | return ERR_PTR(-ENOMEM); |
| 851 | |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 852 | dev->dev = dev_get_by_index(net, val->ifindex); |
| 853 | if (!dev->dev) |
| 854 | goto err_out; |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 855 | |
Jesper Dangaard Brouer | 281920b | 2020-06-09 15:31:46 +0200 | [diff] [blame] | 856 | if (val->bpf_prog.fd > 0) { |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 857 | prog = bpf_prog_get_type_dev(val->bpf_prog.fd, |
| 858 | BPF_PROG_TYPE_XDP, false); |
| 859 | if (IS_ERR(prog)) |
| 860 | goto err_put_dev; |
| 861 | if (prog->expected_attach_type != BPF_XDP_DEVMAP) |
| 862 | goto err_put_prog; |
| 863 | } |
| 864 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 865 | dev->idx = idx; |
| 866 | dev->dtab = dtab; |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 867 | if (prog) { |
| 868 | dev->xdp_prog = prog; |
| 869 | dev->val.bpf_prog.id = prog->aux->id; |
| 870 | } else { |
| 871 | dev->xdp_prog = NULL; |
| 872 | dev->val.bpf_prog.id = 0; |
| 873 | } |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 874 | dev->val.ifindex = val->ifindex; |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 875 | |
| 876 | return dev; |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 877 | err_put_prog: |
| 878 | bpf_prog_put(prog); |
| 879 | err_put_dev: |
| 880 | dev_put(dev->dev); |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 881 | err_out: |
| 882 | kfree(dev); |
| 883 | return ERR_PTR(-EINVAL); |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 884 | } |
| 885 | |
| 886 | static int __dev_map_update_elem(struct net *net, struct bpf_map *map, |
| 887 | void *key, void *value, u64 map_flags) |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 888 | { |
| 889 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 890 | struct bpf_dtab_netdev *dev, *old_dev; |
Jesper Dangaard Brouer | 281920b | 2020-06-09 15:31:46 +0200 | [diff] [blame] | 891 | struct bpf_devmap_val val = {}; |
Toke Høiland-Jørgensen | d5df283 | 2019-06-28 11:12:34 +0200 | [diff] [blame] | 892 | u32 i = *(u32 *)key; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 893 | |
| 894 | if (unlikely(map_flags > BPF_EXIST)) |
| 895 | return -EINVAL; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 896 | if (unlikely(i >= dtab->map.max_entries)) |
| 897 | return -E2BIG; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 898 | if (unlikely(map_flags == BPF_NOEXIST)) |
| 899 | return -EEXIST; |
| 900 | |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 901 | /* already verified value_size <= sizeof val */ |
| 902 | memcpy(&val, value, map->value_size); |
| 903 | |
| 904 | if (!val.ifindex) { |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 905 | dev = NULL; |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 906 | /* can not specify fd if ifindex is 0 */ |
Jesper Dangaard Brouer | 281920b | 2020-06-09 15:31:46 +0200 | [diff] [blame] | 907 | if (val.bpf_prog.fd > 0) |
David Ahern | fbee97f | 2020-05-29 16:07:13 -0600 | [diff] [blame] | 908 | return -EINVAL; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 909 | } else { |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 910 | dev = __dev_map_alloc_node(net, dtab, &val, i); |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 911 | if (IS_ERR(dev)) |
| 912 | return PTR_ERR(dev); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 913 | } |
| 914 | |
| 915 | /* Use call_rcu() here to ensure rcu critical sections have completed |
| 916 | * Remembering the driver side flush operation will happen before the |
| 917 | * net device is removed. |
| 918 | */ |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 919 | old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev))); |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 920 | if (old_dev) |
| 921 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 922 | |
| 923 | return 0; |
| 924 | } |
| 925 | |
Toke Høiland-Jørgensen | fca16e5 | 2019-07-26 18:06:53 +0200 | [diff] [blame] | 926 | static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 927 | u64 map_flags) |
| 928 | { |
| 929 | return __dev_map_update_elem(current->nsproxy->net_ns, |
| 930 | map, key, value, map_flags); |
| 931 | } |
| 932 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 933 | static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, |
| 934 | void *key, void *value, u64 map_flags) |
| 935 | { |
| 936 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); |
| 937 | struct bpf_dtab_netdev *dev, *old_dev; |
Jesper Dangaard Brouer | 281920b | 2020-06-09 15:31:46 +0200 | [diff] [blame] | 938 | struct bpf_devmap_val val = {}; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 939 | u32 idx = *(u32 *)key; |
| 940 | unsigned long flags; |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 941 | int err = -EEXIST; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 942 | |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 943 | /* already verified value_size <= sizeof val */ |
| 944 | memcpy(&val, value, map->value_size); |
| 945 | |
| 946 | if (unlikely(map_flags > BPF_EXIST || !val.ifindex)) |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 947 | return -EINVAL; |
| 948 | |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 949 | spin_lock_irqsave(&dtab->index_lock, flags); |
| 950 | |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 951 | old_dev = __dev_map_hash_lookup_elem(map, idx); |
| 952 | if (old_dev && (map_flags & BPF_NOEXIST)) |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 953 | goto out_err; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 954 | |
David Ahern | 7f1c042 | 2020-05-29 16:07:12 -0600 | [diff] [blame] | 955 | dev = __dev_map_alloc_node(net, dtab, &val, idx); |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 956 | if (IS_ERR(dev)) { |
| 957 | err = PTR_ERR(dev); |
| 958 | goto out_err; |
| 959 | } |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 960 | |
| 961 | if (old_dev) { |
| 962 | hlist_del_rcu(&old_dev->index_hlist); |
| 963 | } else { |
| 964 | if (dtab->items >= dtab->map.max_entries) { |
| 965 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 966 | call_rcu(&dev->rcu, __dev_map_entry_free); |
| 967 | return -E2BIG; |
| 968 | } |
| 969 | dtab->items++; |
| 970 | } |
| 971 | |
| 972 | hlist_add_head_rcu(&dev->index_hlist, |
| 973 | dev_map_index_hash(dtab, idx)); |
| 974 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 975 | |
| 976 | if (old_dev) |
| 977 | call_rcu(&old_dev->rcu, __dev_map_entry_free); |
| 978 | |
| 979 | return 0; |
Toke Høiland-Jørgensen | af58e7e | 2019-09-08 09:20:16 +0100 | [diff] [blame] | 980 | |
| 981 | out_err: |
| 982 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 983 | return err; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 984 | } |
| 985 | |
| 986 | static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, |
| 987 | u64 map_flags) |
| 988 | { |
| 989 | return __dev_map_hash_update_elem(current->nsproxy->net_ns, |
| 990 | map, key, value, map_flags); |
| 991 | } |
| 992 | |
Björn Töpel | e6a4750 | 2021-03-08 12:29:06 +0100 | [diff] [blame] | 993 | static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) |
| 994 | { |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 995 | return __bpf_xdp_redirect_map(map, ifindex, flags, |
| 996 | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, |
| 997 | __dev_map_lookup_elem); |
Björn Töpel | e6a4750 | 2021-03-08 12:29:06 +0100 | [diff] [blame] | 998 | } |
| 999 | |
| 1000 | static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) |
| 1001 | { |
Hangbin Liu | e624d4e | 2021-05-19 17:07:45 +0800 | [diff] [blame] | 1002 | return __bpf_xdp_redirect_map(map, ifindex, flags, |
| 1003 | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS, |
| 1004 | __dev_map_hash_lookup_elem); |
Björn Töpel | e6a4750 | 2021-03-08 12:29:06 +0100 | [diff] [blame] | 1005 | } |
| 1006 | |
Andrey Ignatov | 2872e9a | 2020-06-19 14:11:44 -0700 | [diff] [blame] | 1007 | static int dev_map_btf_id; |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 1008 | const struct bpf_map_ops dev_map_ops = { |
Martin KaFai Lau | f4d0525 | 2020-08-27 18:18:06 -0700 | [diff] [blame] | 1009 | .map_meta_equal = bpf_map_meta_equal, |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 1010 | .map_alloc = dev_map_alloc, |
| 1011 | .map_free = dev_map_free, |
| 1012 | .map_get_next_key = dev_map_get_next_key, |
| 1013 | .map_lookup_elem = dev_map_lookup_elem, |
| 1014 | .map_update_elem = dev_map_update_elem, |
| 1015 | .map_delete_elem = dev_map_delete_elem, |
Daniel Borkmann | e8d2bec | 2018-08-12 01:59:17 +0200 | [diff] [blame] | 1016 | .map_check_btf = map_check_no_btf, |
Andrey Ignatov | 2872e9a | 2020-06-19 14:11:44 -0700 | [diff] [blame] | 1017 | .map_btf_name = "bpf_dtab", |
| 1018 | .map_btf_id = &dev_map_btf_id, |
Björn Töpel | e6a4750 | 2021-03-08 12:29:06 +0100 | [diff] [blame] | 1019 | .map_redirect = dev_map_redirect, |
John Fastabend | 546ac1f | 2017-07-17 09:28:56 -0700 | [diff] [blame] | 1020 | }; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1021 | |
Andrey Ignatov | 2872e9a | 2020-06-19 14:11:44 -0700 | [diff] [blame] | 1022 | static int dev_map_hash_map_btf_id; |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 1023 | const struct bpf_map_ops dev_map_hash_ops = { |
Martin KaFai Lau | f4d0525 | 2020-08-27 18:18:06 -0700 | [diff] [blame] | 1024 | .map_meta_equal = bpf_map_meta_equal, |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 1025 | .map_alloc = dev_map_alloc, |
| 1026 | .map_free = dev_map_free, |
| 1027 | .map_get_next_key = dev_map_hash_get_next_key, |
| 1028 | .map_lookup_elem = dev_map_hash_lookup_elem, |
| 1029 | .map_update_elem = dev_map_hash_update_elem, |
| 1030 | .map_delete_elem = dev_map_hash_delete_elem, |
| 1031 | .map_check_btf = map_check_no_btf, |
Andrey Ignatov | 2872e9a | 2020-06-19 14:11:44 -0700 | [diff] [blame] | 1032 | .map_btf_name = "bpf_dtab", |
| 1033 | .map_btf_id = &dev_map_hash_map_btf_id, |
Björn Töpel | e6a4750 | 2021-03-08 12:29:06 +0100 | [diff] [blame] | 1034 | .map_redirect = dev_hash_map_redirect, |
Toke Høiland-Jørgensen | 6f9d451 | 2019-07-26 18:06:55 +0200 | [diff] [blame] | 1035 | }; |
| 1036 | |
Toke Høiland-Jørgensen | ce197d8 | 2019-10-19 13:19:31 +0200 | [diff] [blame] | 1037 | static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, |
| 1038 | struct net_device *netdev) |
| 1039 | { |
| 1040 | unsigned long flags; |
| 1041 | u32 i; |
| 1042 | |
| 1043 | spin_lock_irqsave(&dtab->index_lock, flags); |
| 1044 | for (i = 0; i < dtab->n_buckets; i++) { |
| 1045 | struct bpf_dtab_netdev *dev; |
| 1046 | struct hlist_head *head; |
| 1047 | struct hlist_node *next; |
| 1048 | |
| 1049 | head = dev_map_index_hash(dtab, i); |
| 1050 | |
| 1051 | hlist_for_each_entry_safe(dev, next, head, index_hlist) { |
| 1052 | if (netdev != dev->dev) |
| 1053 | continue; |
| 1054 | |
| 1055 | dtab->items--; |
| 1056 | hlist_del_rcu(&dev->index_hlist); |
| 1057 | call_rcu(&dev->rcu, __dev_map_entry_free); |
| 1058 | } |
| 1059 | } |
| 1060 | spin_unlock_irqrestore(&dtab->index_lock, flags); |
| 1061 | } |
| 1062 | |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1063 | static int dev_map_notification(struct notifier_block *notifier, |
| 1064 | ulong event, void *ptr) |
| 1065 | { |
| 1066 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); |
| 1067 | struct bpf_dtab *dtab; |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 1068 | int i, cpu; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1069 | |
| 1070 | switch (event) { |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 1071 | case NETDEV_REGISTER: |
| 1072 | if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq) |
| 1073 | break; |
| 1074 | |
| 1075 | /* will be freed in free_netdev() */ |
Jun'ichi Nomura | 7d4553b | 2021-02-09 08:24:52 +0000 | [diff] [blame] | 1076 | netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue); |
Toke Høiland-Jørgensen | 75ccae6 | 2020-01-16 16:14:44 +0100 | [diff] [blame] | 1077 | if (!netdev->xdp_bulkq) |
| 1078 | return NOTIFY_BAD; |
| 1079 | |
| 1080 | for_each_possible_cpu(cpu) |
| 1081 | per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev; |
| 1082 | break; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1083 | case NETDEV_UNREGISTER: |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 1084 | /* This rcu_read_lock/unlock pair is needed because |
| 1085 | * dev_map_list is an RCU list AND to ensure a delete |
| 1086 | * operation does not free a netdev_map entry while we |
| 1087 | * are comparing it against the netdev being unregistered. |
| 1088 | */ |
| 1089 | rcu_read_lock(); |
| 1090 | list_for_each_entry_rcu(dtab, &dev_map_list, list) { |
Toke Høiland-Jørgensen | ce197d8 | 2019-10-19 13:19:31 +0200 | [diff] [blame] | 1091 | if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
| 1092 | dev_map_hash_remove_netdev(dtab, netdev); |
| 1093 | continue; |
| 1094 | } |
| 1095 | |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1096 | for (i = 0; i < dtab->map.max_entries; i++) { |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 1097 | struct bpf_dtab_netdev *dev, *odev; |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1098 | |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 1099 | dev = rcu_dereference(dtab->netdev_map[i]); |
Taehee Yoo | f592f80 | 2018-10-24 20:15:17 +0900 | [diff] [blame] | 1100 | if (!dev || netdev != dev->dev) |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1101 | continue; |
Toke Høiland-Jørgensen | 782347b | 2021-06-24 18:05:55 +0200 | [diff] [blame] | 1102 | odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL)); |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 1103 | if (dev == odev) |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1104 | call_rcu(&dev->rcu, |
| 1105 | __dev_map_entry_free); |
| 1106 | } |
| 1107 | } |
John Fastabend | 4cc7b95 | 2017-08-04 22:02:19 -0700 | [diff] [blame] | 1108 | rcu_read_unlock(); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1109 | break; |
| 1110 | default: |
| 1111 | break; |
| 1112 | } |
| 1113 | return NOTIFY_OK; |
| 1114 | } |
| 1115 | |
| 1116 | static struct notifier_block dev_map_notifier = { |
| 1117 | .notifier_call = dev_map_notification, |
| 1118 | }; |
| 1119 | |
| 1120 | static int __init dev_map_init(void) |
| 1121 | { |
Björn Töpel | 9636000 | 2019-12-19 07:10:03 +0100 | [diff] [blame] | 1122 | int cpu; |
| 1123 | |
Jesper Dangaard Brouer | 67f29e0 | 2018-05-24 16:45:46 +0200 | [diff] [blame] | 1124 | /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ |
| 1125 | BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != |
| 1126 | offsetof(struct _bpf_dtab_netdev, dev)); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1127 | register_netdevice_notifier(&dev_map_notifier); |
Björn Töpel | 9636000 | 2019-12-19 07:10:03 +0100 | [diff] [blame] | 1128 | |
| 1129 | for_each_possible_cpu(cpu) |
Toke Høiland-Jørgensen | 1d23388 | 2020-01-16 16:14:45 +0100 | [diff] [blame] | 1130 | INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); |
John Fastabend | 2ddf71e | 2017-07-17 09:30:02 -0700 | [diff] [blame] | 1131 | return 0; |
| 1132 | } |
| 1133 | |
| 1134 | subsys_initcall(dev_map_init); |