blob: a3ed49cd95c31191f3412ee9309aa00b7876ba6f [file] [log] [blame]
Jason A. Donenfelde7096c12019-12-09 00:27:34 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#include "queueing.h"
7#include "socket.h"
8#include "timers.h"
9#include "device.h"
10#include "ratelimiter.h"
11#include "peer.h"
12#include "messages.h"
13
14#include <linux/module.h>
15#include <linux/rtnetlink.h>
16#include <linux/inet.h>
17#include <linux/netdevice.h>
18#include <linux/inetdevice.h>
19#include <linux/if_arp.h>
20#include <linux/icmp.h>
21#include <linux/suspend.h>
22#include <net/icmp.h>
23#include <net/rtnetlink.h>
24#include <net/ip_tunnels.h>
25#include <net/addrconf.h>
26
27static LIST_HEAD(device_list);
28
29static int wg_open(struct net_device *dev)
30{
31 struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
32 struct inet6_dev *dev_v6 = __in6_dev_get(dev);
33 struct wg_device *wg = netdev_priv(dev);
34 struct wg_peer *peer;
35 int ret;
36
37 if (dev_v4) {
38 /* At some point we might put this check near the ip_rt_send_
39 * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
40 * to the current secpath check.
41 */
42 IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
43 IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
44 }
45 if (dev_v6)
46 dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
47
Jason A. Donenfeld900575a2020-06-23 03:59:45 -060048 mutex_lock(&wg->device_update_lock);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010049 ret = wg_socket_init(wg, wg->incoming_port);
50 if (ret < 0)
Jason A. Donenfeld900575a2020-06-23 03:59:45 -060051 goto out;
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010052 list_for_each_entry(peer, &wg->peer_list, peer_list) {
53 wg_packet_send_staged_packets(peer);
54 if (peer->persistent_keepalive_interval)
55 wg_packet_send_keepalive(peer);
56 }
Jason A. Donenfeld900575a2020-06-23 03:59:45 -060057out:
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010058 mutex_unlock(&wg->device_update_lock);
Jason A. Donenfeld900575a2020-06-23 03:59:45 -060059 return ret;
Jason A. Donenfelde7096c12019-12-09 00:27:34 +010060}
61
62#ifdef CONFIG_PM_SLEEP
63static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
64 void *data)
65{
66 struct wg_device *wg;
67 struct wg_peer *peer;
68
69 /* If the machine is constantly suspending and resuming, as part of
70 * its normal operation rather than as a somewhat rare event, then we
71 * don't actually want to clear keys.
72 */
73 if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
74 return 0;
75
76 if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
77 return 0;
78
79 rtnl_lock();
80 list_for_each_entry(wg, &device_list, device_list) {
81 mutex_lock(&wg->device_update_lock);
82 list_for_each_entry(peer, &wg->peer_list, peer_list) {
83 del_timer(&peer->timer_zero_key_material);
84 wg_noise_handshake_clear(&peer->handshake);
85 wg_noise_keypairs_clear(&peer->keypairs);
86 }
87 mutex_unlock(&wg->device_update_lock);
88 }
89 rtnl_unlock();
90 rcu_barrier();
91 return 0;
92}
93
94static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
95#endif
96
97static int wg_stop(struct net_device *dev)
98{
99 struct wg_device *wg = netdev_priv(dev);
100 struct wg_peer *peer;
101
102 mutex_lock(&wg->device_update_lock);
103 list_for_each_entry(peer, &wg->peer_list, peer_list) {
104 wg_packet_purge_staged_packets(peer);
105 wg_timers_stop(peer);
106 wg_noise_handshake_clear(&peer->handshake);
107 wg_noise_keypairs_clear(&peer->keypairs);
108 wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
109 }
110 mutex_unlock(&wg->device_update_lock);
111 skb_queue_purge(&wg->incoming_handshakes);
112 wg_socket_reinit(wg, NULL, NULL);
113 return 0;
114}
115
116static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
117{
118 struct wg_device *wg = netdev_priv(dev);
119 struct sk_buff_head packets;
120 struct wg_peer *peer;
121 struct sk_buff *next;
122 sa_family_t family;
123 u32 mtu;
124 int ret;
125
Jason A. Donenfelda55886042020-03-18 18:30:45 -0600126 if (unlikely(!wg_check_packet_protocol(skb))) {
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100127 ret = -EPROTONOSUPPORT;
128 net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
129 goto err;
130 }
131
132 peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
133 if (unlikely(!peer)) {
134 ret = -ENOKEY;
135 if (skb->protocol == htons(ETH_P_IP))
136 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
137 dev->name, &ip_hdr(skb)->daddr);
138 else if (skb->protocol == htons(ETH_P_IPV6))
139 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
140 dev->name, &ipv6_hdr(skb)->daddr);
141 goto err;
142 }
143
144 family = READ_ONCE(peer->endpoint.addr.sa_family);
145 if (unlikely(family != AF_INET && family != AF_INET6)) {
146 ret = -EDESTADDRREQ;
147 net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
148 dev->name, peer->internal_id);
149 goto err_peer;
150 }
151
152 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
153
154 __skb_queue_head_init(&packets);
155 if (!skb_is_gso(skb)) {
156 skb_mark_not_on_list(skb);
157 } else {
158 struct sk_buff *segs = skb_gso_segment(skb, 0);
159
160 if (unlikely(IS_ERR(segs))) {
161 ret = PTR_ERR(segs);
162 goto err_peer;
163 }
164 dev_kfree_skb(skb);
165 skb = segs;
166 }
167
168 skb_list_walk_safe(skb, skb, next) {
169 skb_mark_not_on_list(skb);
170
171 skb = skb_share_check(skb, GFP_ATOMIC);
172 if (unlikely(!skb))
173 continue;
174
175 /* We only need to keep the original dst around for icmp,
176 * so at this point we're in a position to drop it.
177 */
178 skb_dst_drop(skb);
179
180 PACKET_CB(skb)->mtu = mtu;
181
182 __skb_queue_tail(&packets, skb);
183 }
184
185 spin_lock_bh(&peer->staged_packet_queue.lock);
186 /* If the queue is getting too big, we start removing the oldest packets
187 * until it's small again. We do this before adding the new packet, so
188 * we don't remove GSO segments that are in excess.
189 */
190 while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
191 dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
192 ++dev->stats.tx_dropped;
193 }
194 skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
195 spin_unlock_bh(&peer->staged_packet_queue.lock);
196
197 wg_packet_send_staged_packets(peer);
198
199 wg_peer_put(peer);
200 return NETDEV_TX_OK;
201
202err_peer:
203 wg_peer_put(peer);
204err:
205 ++dev->stats.tx_errors;
206 if (skb->protocol == htons(ETH_P_IP))
Jason A. Donenfelda12d7f32020-02-11 20:47:08 +0100207 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100208 else if (skb->protocol == htons(ETH_P_IPV6))
Jason A. Donenfelda12d7f32020-02-11 20:47:08 +0100209 icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100210 kfree_skb(skb);
211 return ret;
212}
213
214static const struct net_device_ops netdev_ops = {
215 .ndo_open = wg_open,
216 .ndo_stop = wg_stop,
217 .ndo_start_xmit = wg_xmit,
Heiner Kallweit42f9e5f2020-11-07 21:53:19 +0100218 .ndo_get_stats64 = dev_get_tstats64
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100219};
220
221static void wg_destruct(struct net_device *dev)
222{
223 struct wg_device *wg = netdev_priv(dev);
224
225 rtnl_lock();
226 list_del(&wg->device_list);
227 rtnl_unlock();
228 mutex_lock(&wg->device_update_lock);
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600229 rcu_assign_pointer(wg->creating_net, NULL);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100230 wg->incoming_port = 0;
231 wg_socket_reinit(wg, NULL, NULL);
232 /* The final references are cleared in the below calls to destroy_workqueue. */
233 wg_peer_remove_all(wg);
234 destroy_workqueue(wg->handshake_receive_wq);
235 destroy_workqueue(wg->handshake_send_wq);
236 destroy_workqueue(wg->packet_crypt_wq);
237 wg_packet_queue_free(&wg->decrypt_queue, true);
238 wg_packet_queue_free(&wg->encrypt_queue, true);
239 rcu_barrier(); /* Wait for all the peers to be actually freed. */
240 wg_ratelimiter_uninit();
241 memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
242 skb_queue_purge(&wg->incoming_handshakes);
243 free_percpu(dev->tstats);
244 free_percpu(wg->incoming_handshakes_worker);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100245 kvfree(wg->index_hashtable);
246 kvfree(wg->peer_hashtable);
247 mutex_unlock(&wg->device_update_lock);
248
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600249 pr_debug("%s: Interface destroyed\n", dev->name);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100250 free_netdev(dev);
251}
252
253static const struct device_type device_type = { .name = KBUILD_MODNAME };
254
255static void wg_setup(struct net_device *dev)
256{
257 struct wg_device *wg = netdev_priv(dev);
258 enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
259 NETIF_F_SG | NETIF_F_GSO |
260 NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
Jason A. Donenfeld175f1ca2020-02-14 23:57:22 +0100261 const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) +
262 max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100263
264 dev->netdev_ops = &netdev_ops;
Jason A. Donenfeld01a49672020-06-29 19:06:20 -0600265 dev->header_ops = &ip_tunnel_header_ops;
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100266 dev->hard_header_len = 0;
267 dev->addr_len = 0;
268 dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
269 dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
270 dev->type = ARPHRD_NONE;
271 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
272 dev->priv_flags |= IFF_NO_QUEUE;
273 dev->features |= NETIF_F_LLTX;
274 dev->features |= WG_NETDEV_FEATURES;
275 dev->hw_features |= WG_NETDEV_FEATURES;
276 dev->hw_enc_features |= WG_NETDEV_FEATURES;
Jason A. Donenfeld175f1ca2020-02-14 23:57:22 +0100277 dev->mtu = ETH_DATA_LEN - overhead;
278 dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100279
280 SET_NETDEV_DEVTYPE(dev, &device_type);
281
282 /* We need to keep the dst around in case of icmp replies. */
283 netif_keep_dst(dev);
284
285 memset(wg, 0, sizeof(*wg));
286 wg->dev = dev;
287}
288
289static int wg_newlink(struct net *src_net, struct net_device *dev,
290 struct nlattr *tb[], struct nlattr *data[],
291 struct netlink_ext_ack *extack)
292{
293 struct wg_device *wg = netdev_priv(dev);
294 int ret = -ENOMEM;
295
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600296 rcu_assign_pointer(wg->creating_net, src_net);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100297 init_rwsem(&wg->static_identity.lock);
298 mutex_init(&wg->socket_update_lock);
299 mutex_init(&wg->device_update_lock);
300 skb_queue_head_init(&wg->incoming_handshakes);
301 wg_allowedips_init(&wg->peer_allowedips);
302 wg_cookie_checker_init(&wg->cookie_checker, wg);
303 INIT_LIST_HEAD(&wg->peer_list);
304 wg->device_update_gen = 1;
305
306 wg->peer_hashtable = wg_pubkey_hashtable_alloc();
307 if (!wg->peer_hashtable)
308 return ret;
309
310 wg->index_hashtable = wg_index_hashtable_alloc();
311 if (!wg->index_hashtable)
312 goto err_free_peer_hashtable;
313
314 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
315 if (!dev->tstats)
316 goto err_free_index_hashtable;
317
318 wg->incoming_handshakes_worker =
319 wg_packet_percpu_multicore_worker_alloc(
320 wg_packet_handshake_receive_worker, wg);
321 if (!wg->incoming_handshakes_worker)
322 goto err_free_tstats;
323
324 wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
325 WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
326 if (!wg->handshake_receive_wq)
327 goto err_free_incoming_handshakes;
328
329 wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
330 WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
331 if (!wg->handshake_send_wq)
332 goto err_destroy_handshake_receive;
333
334 wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
335 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
336 if (!wg->packet_crypt_wq)
337 goto err_destroy_handshake_send;
338
339 ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
340 true, MAX_QUEUED_PACKETS);
341 if (ret < 0)
342 goto err_destroy_packet_crypt;
343
344 ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
345 true, MAX_QUEUED_PACKETS);
346 if (ret < 0)
347 goto err_free_encrypt_queue;
348
349 ret = wg_ratelimiter_init();
350 if (ret < 0)
351 goto err_free_decrypt_queue;
352
353 ret = register_netdevice(dev);
354 if (ret < 0)
355 goto err_uninit_ratelimiter;
356
357 list_add(&wg->device_list, &device_list);
358
359 /* We wait until the end to assign priv_destructor, so that
360 * register_netdevice doesn't call it for us if it fails.
361 */
362 dev->priv_destructor = wg_destruct;
363
364 pr_debug("%s: Interface created\n", dev->name);
365 return ret;
366
367err_uninit_ratelimiter:
368 wg_ratelimiter_uninit();
369err_free_decrypt_queue:
370 wg_packet_queue_free(&wg->decrypt_queue, true);
371err_free_encrypt_queue:
372 wg_packet_queue_free(&wg->encrypt_queue, true);
373err_destroy_packet_crypt:
374 destroy_workqueue(wg->packet_crypt_wq);
375err_destroy_handshake_send:
376 destroy_workqueue(wg->handshake_send_wq);
377err_destroy_handshake_receive:
378 destroy_workqueue(wg->handshake_receive_wq);
379err_free_incoming_handshakes:
380 free_percpu(wg->incoming_handshakes_worker);
381err_free_tstats:
382 free_percpu(dev->tstats);
383err_free_index_hashtable:
384 kvfree(wg->index_hashtable);
385err_free_peer_hashtable:
386 kvfree(wg->peer_hashtable);
387 return ret;
388}
389
390static struct rtnl_link_ops link_ops __read_mostly = {
391 .kind = KBUILD_MODNAME,
392 .priv_size = sizeof(struct wg_device),
393 .setup = wg_setup,
394 .newlink = wg_newlink,
395};
396
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600397static void wg_netns_pre_exit(struct net *net)
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100398{
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600399 struct wg_device *wg;
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100400
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600401 rtnl_lock();
402 list_for_each_entry(wg, &device_list, device_list) {
403 if (rcu_access_pointer(wg->creating_net) == net) {
404 pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
405 netif_carrier_off(wg->dev);
406 mutex_lock(&wg->device_update_lock);
407 rcu_assign_pointer(wg->creating_net, NULL);
408 wg_socket_reinit(wg, NULL, NULL);
409 mutex_unlock(&wg->device_update_lock);
410 }
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100411 }
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600412 rtnl_unlock();
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100413}
414
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600415static struct pernet_operations pernet_ops = {
416 .pre_exit = wg_netns_pre_exit
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100417};
418
419int __init wg_device_init(void)
420{
421 int ret;
422
423#ifdef CONFIG_PM_SLEEP
424 ret = register_pm_notifier(&pm_notifier);
425 if (ret)
426 return ret;
427#endif
428
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600429 ret = register_pernet_device(&pernet_ops);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100430 if (ret)
431 goto error_pm;
432
433 ret = rtnl_link_register(&link_ops);
434 if (ret)
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600435 goto error_pernet;
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100436
437 return 0;
438
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600439error_pernet:
440 unregister_pernet_device(&pernet_ops);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100441error_pm:
442#ifdef CONFIG_PM_SLEEP
443 unregister_pm_notifier(&pm_notifier);
444#endif
445 return ret;
446}
447
448void wg_device_uninit(void)
449{
450 rtnl_link_unregister(&link_ops);
Jason A. Donenfeld900575a2020-06-23 03:59:45 -0600451 unregister_pernet_device(&pernet_ops);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100452#ifdef CONFIG_PM_SLEEP
453 unregister_pm_notifier(&pm_notifier);
454#endif
455 rcu_barrier();
456}