blob: 43db442b1373073eaf5e805cfe6cfee15875437a [file] [log] [blame]
Jason A. Donenfelde7096c12019-12-09 00:27:34 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#include "queueing.h"
7#include "socket.h"
8#include "timers.h"
9#include "device.h"
10#include "ratelimiter.h"
11#include "peer.h"
12#include "messages.h"
13
14#include <linux/module.h>
15#include <linux/rtnetlink.h>
16#include <linux/inet.h>
17#include <linux/netdevice.h>
18#include <linux/inetdevice.h>
19#include <linux/if_arp.h>
20#include <linux/icmp.h>
21#include <linux/suspend.h>
22#include <net/icmp.h>
23#include <net/rtnetlink.h>
24#include <net/ip_tunnels.h>
25#include <net/addrconf.h>
26
27static LIST_HEAD(device_list);
28
29static int wg_open(struct net_device *dev)
30{
31 struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
32 struct inet6_dev *dev_v6 = __in6_dev_get(dev);
33 struct wg_device *wg = netdev_priv(dev);
34 struct wg_peer *peer;
35 int ret;
36
37 if (dev_v4) {
38 /* At some point we might put this check near the ip_rt_send_
39 * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
40 * to the current secpath check.
41 */
42 IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
43 IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
44 }
45 if (dev_v6)
46 dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
47
48 ret = wg_socket_init(wg, wg->incoming_port);
49 if (ret < 0)
50 return ret;
51 mutex_lock(&wg->device_update_lock);
52 list_for_each_entry(peer, &wg->peer_list, peer_list) {
53 wg_packet_send_staged_packets(peer);
54 if (peer->persistent_keepalive_interval)
55 wg_packet_send_keepalive(peer);
56 }
57 mutex_unlock(&wg->device_update_lock);
58 return 0;
59}
60
61#ifdef CONFIG_PM_SLEEP
62static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
63 void *data)
64{
65 struct wg_device *wg;
66 struct wg_peer *peer;
67
68 /* If the machine is constantly suspending and resuming, as part of
69 * its normal operation rather than as a somewhat rare event, then we
70 * don't actually want to clear keys.
71 */
72 if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
73 return 0;
74
75 if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
76 return 0;
77
78 rtnl_lock();
79 list_for_each_entry(wg, &device_list, device_list) {
80 mutex_lock(&wg->device_update_lock);
81 list_for_each_entry(peer, &wg->peer_list, peer_list) {
82 del_timer(&peer->timer_zero_key_material);
83 wg_noise_handshake_clear(&peer->handshake);
84 wg_noise_keypairs_clear(&peer->keypairs);
85 }
86 mutex_unlock(&wg->device_update_lock);
87 }
88 rtnl_unlock();
89 rcu_barrier();
90 return 0;
91}
92
93static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
94#endif
95
96static int wg_stop(struct net_device *dev)
97{
98 struct wg_device *wg = netdev_priv(dev);
99 struct wg_peer *peer;
100
101 mutex_lock(&wg->device_update_lock);
102 list_for_each_entry(peer, &wg->peer_list, peer_list) {
103 wg_packet_purge_staged_packets(peer);
104 wg_timers_stop(peer);
105 wg_noise_handshake_clear(&peer->handshake);
106 wg_noise_keypairs_clear(&peer->keypairs);
107 wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
108 }
109 mutex_unlock(&wg->device_update_lock);
110 skb_queue_purge(&wg->incoming_handshakes);
111 wg_socket_reinit(wg, NULL, NULL);
112 return 0;
113}
114
115static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
116{
117 struct wg_device *wg = netdev_priv(dev);
118 struct sk_buff_head packets;
119 struct wg_peer *peer;
120 struct sk_buff *next;
121 sa_family_t family;
122 u32 mtu;
123 int ret;
124
125 if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
126 ret = -EPROTONOSUPPORT;
127 net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
128 goto err;
129 }
130
131 peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
132 if (unlikely(!peer)) {
133 ret = -ENOKEY;
134 if (skb->protocol == htons(ETH_P_IP))
135 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
136 dev->name, &ip_hdr(skb)->daddr);
137 else if (skb->protocol == htons(ETH_P_IPV6))
138 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
139 dev->name, &ipv6_hdr(skb)->daddr);
140 goto err;
141 }
142
143 family = READ_ONCE(peer->endpoint.addr.sa_family);
144 if (unlikely(family != AF_INET && family != AF_INET6)) {
145 ret = -EDESTADDRREQ;
146 net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
147 dev->name, peer->internal_id);
148 goto err_peer;
149 }
150
151 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
152
153 __skb_queue_head_init(&packets);
154 if (!skb_is_gso(skb)) {
155 skb_mark_not_on_list(skb);
156 } else {
157 struct sk_buff *segs = skb_gso_segment(skb, 0);
158
159 if (unlikely(IS_ERR(segs))) {
160 ret = PTR_ERR(segs);
161 goto err_peer;
162 }
163 dev_kfree_skb(skb);
164 skb = segs;
165 }
166
167 skb_list_walk_safe(skb, skb, next) {
168 skb_mark_not_on_list(skb);
169
170 skb = skb_share_check(skb, GFP_ATOMIC);
171 if (unlikely(!skb))
172 continue;
173
174 /* We only need to keep the original dst around for icmp,
175 * so at this point we're in a position to drop it.
176 */
177 skb_dst_drop(skb);
178
179 PACKET_CB(skb)->mtu = mtu;
180
181 __skb_queue_tail(&packets, skb);
182 }
183
184 spin_lock_bh(&peer->staged_packet_queue.lock);
185 /* If the queue is getting too big, we start removing the oldest packets
186 * until it's small again. We do this before adding the new packet, so
187 * we don't remove GSO segments that are in excess.
188 */
189 while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
190 dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
191 ++dev->stats.tx_dropped;
192 }
193 skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
194 spin_unlock_bh(&peer->staged_packet_queue.lock);
195
196 wg_packet_send_staged_packets(peer);
197
198 wg_peer_put(peer);
199 return NETDEV_TX_OK;
200
201err_peer:
202 wg_peer_put(peer);
203err:
204 ++dev->stats.tx_errors;
205 if (skb->protocol == htons(ETH_P_IP))
Jason A. Donenfelda12d7f32020-02-11 20:47:08 +0100206 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100207 else if (skb->protocol == htons(ETH_P_IPV6))
Jason A. Donenfelda12d7f32020-02-11 20:47:08 +0100208 icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
Jason A. Donenfelde7096c12019-12-09 00:27:34 +0100209 kfree_skb(skb);
210 return ret;
211}
212
213static const struct net_device_ops netdev_ops = {
214 .ndo_open = wg_open,
215 .ndo_stop = wg_stop,
216 .ndo_start_xmit = wg_xmit,
217 .ndo_get_stats64 = ip_tunnel_get_stats64
218};
219
220static void wg_destruct(struct net_device *dev)
221{
222 struct wg_device *wg = netdev_priv(dev);
223
224 rtnl_lock();
225 list_del(&wg->device_list);
226 rtnl_unlock();
227 mutex_lock(&wg->device_update_lock);
228 wg->incoming_port = 0;
229 wg_socket_reinit(wg, NULL, NULL);
230 /* The final references are cleared in the below calls to destroy_workqueue. */
231 wg_peer_remove_all(wg);
232 destroy_workqueue(wg->handshake_receive_wq);
233 destroy_workqueue(wg->handshake_send_wq);
234 destroy_workqueue(wg->packet_crypt_wq);
235 wg_packet_queue_free(&wg->decrypt_queue, true);
236 wg_packet_queue_free(&wg->encrypt_queue, true);
237 rcu_barrier(); /* Wait for all the peers to be actually freed. */
238 wg_ratelimiter_uninit();
239 memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
240 skb_queue_purge(&wg->incoming_handshakes);
241 free_percpu(dev->tstats);
242 free_percpu(wg->incoming_handshakes_worker);
243 if (wg->have_creating_net_ref)
244 put_net(wg->creating_net);
245 kvfree(wg->index_hashtable);
246 kvfree(wg->peer_hashtable);
247 mutex_unlock(&wg->device_update_lock);
248
249 pr_debug("%s: Interface deleted\n", dev->name);
250 free_netdev(dev);
251}
252
253static const struct device_type device_type = { .name = KBUILD_MODNAME };
254
255static void wg_setup(struct net_device *dev)
256{
257 struct wg_device *wg = netdev_priv(dev);
258 enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
259 NETIF_F_SG | NETIF_F_GSO |
260 NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
261
262 dev->netdev_ops = &netdev_ops;
263 dev->hard_header_len = 0;
264 dev->addr_len = 0;
265 dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
266 dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
267 dev->type = ARPHRD_NONE;
268 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
269 dev->priv_flags |= IFF_NO_QUEUE;
270 dev->features |= NETIF_F_LLTX;
271 dev->features |= WG_NETDEV_FEATURES;
272 dev->hw_features |= WG_NETDEV_FEATURES;
273 dev->hw_enc_features |= WG_NETDEV_FEATURES;
274 dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH -
275 sizeof(struct udphdr) -
276 max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
277
278 SET_NETDEV_DEVTYPE(dev, &device_type);
279
280 /* We need to keep the dst around in case of icmp replies. */
281 netif_keep_dst(dev);
282
283 memset(wg, 0, sizeof(*wg));
284 wg->dev = dev;
285}
286
287static int wg_newlink(struct net *src_net, struct net_device *dev,
288 struct nlattr *tb[], struct nlattr *data[],
289 struct netlink_ext_ack *extack)
290{
291 struct wg_device *wg = netdev_priv(dev);
292 int ret = -ENOMEM;
293
294 wg->creating_net = src_net;
295 init_rwsem(&wg->static_identity.lock);
296 mutex_init(&wg->socket_update_lock);
297 mutex_init(&wg->device_update_lock);
298 skb_queue_head_init(&wg->incoming_handshakes);
299 wg_allowedips_init(&wg->peer_allowedips);
300 wg_cookie_checker_init(&wg->cookie_checker, wg);
301 INIT_LIST_HEAD(&wg->peer_list);
302 wg->device_update_gen = 1;
303
304 wg->peer_hashtable = wg_pubkey_hashtable_alloc();
305 if (!wg->peer_hashtable)
306 return ret;
307
308 wg->index_hashtable = wg_index_hashtable_alloc();
309 if (!wg->index_hashtable)
310 goto err_free_peer_hashtable;
311
312 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
313 if (!dev->tstats)
314 goto err_free_index_hashtable;
315
316 wg->incoming_handshakes_worker =
317 wg_packet_percpu_multicore_worker_alloc(
318 wg_packet_handshake_receive_worker, wg);
319 if (!wg->incoming_handshakes_worker)
320 goto err_free_tstats;
321
322 wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
323 WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
324 if (!wg->handshake_receive_wq)
325 goto err_free_incoming_handshakes;
326
327 wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
328 WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
329 if (!wg->handshake_send_wq)
330 goto err_destroy_handshake_receive;
331
332 wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
333 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
334 if (!wg->packet_crypt_wq)
335 goto err_destroy_handshake_send;
336
337 ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
338 true, MAX_QUEUED_PACKETS);
339 if (ret < 0)
340 goto err_destroy_packet_crypt;
341
342 ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
343 true, MAX_QUEUED_PACKETS);
344 if (ret < 0)
345 goto err_free_encrypt_queue;
346
347 ret = wg_ratelimiter_init();
348 if (ret < 0)
349 goto err_free_decrypt_queue;
350
351 ret = register_netdevice(dev);
352 if (ret < 0)
353 goto err_uninit_ratelimiter;
354
355 list_add(&wg->device_list, &device_list);
356
357 /* We wait until the end to assign priv_destructor, so that
358 * register_netdevice doesn't call it for us if it fails.
359 */
360 dev->priv_destructor = wg_destruct;
361
362 pr_debug("%s: Interface created\n", dev->name);
363 return ret;
364
365err_uninit_ratelimiter:
366 wg_ratelimiter_uninit();
367err_free_decrypt_queue:
368 wg_packet_queue_free(&wg->decrypt_queue, true);
369err_free_encrypt_queue:
370 wg_packet_queue_free(&wg->encrypt_queue, true);
371err_destroy_packet_crypt:
372 destroy_workqueue(wg->packet_crypt_wq);
373err_destroy_handshake_send:
374 destroy_workqueue(wg->handshake_send_wq);
375err_destroy_handshake_receive:
376 destroy_workqueue(wg->handshake_receive_wq);
377err_free_incoming_handshakes:
378 free_percpu(wg->incoming_handshakes_worker);
379err_free_tstats:
380 free_percpu(dev->tstats);
381err_free_index_hashtable:
382 kvfree(wg->index_hashtable);
383err_free_peer_hashtable:
384 kvfree(wg->peer_hashtable);
385 return ret;
386}
387
388static struct rtnl_link_ops link_ops __read_mostly = {
389 .kind = KBUILD_MODNAME,
390 .priv_size = sizeof(struct wg_device),
391 .setup = wg_setup,
392 .newlink = wg_newlink,
393};
394
395static int wg_netdevice_notification(struct notifier_block *nb,
396 unsigned long action, void *data)
397{
398 struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
399 struct wg_device *wg = netdev_priv(dev);
400
401 ASSERT_RTNL();
402
403 if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
404 return 0;
405
406 if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
407 put_net(wg->creating_net);
408 wg->have_creating_net_ref = false;
409 } else if (dev_net(dev) != wg->creating_net &&
410 !wg->have_creating_net_ref) {
411 wg->have_creating_net_ref = true;
412 get_net(wg->creating_net);
413 }
414 return 0;
415}
416
417static struct notifier_block netdevice_notifier = {
418 .notifier_call = wg_netdevice_notification
419};
420
421int __init wg_device_init(void)
422{
423 int ret;
424
425#ifdef CONFIG_PM_SLEEP
426 ret = register_pm_notifier(&pm_notifier);
427 if (ret)
428 return ret;
429#endif
430
431 ret = register_netdevice_notifier(&netdevice_notifier);
432 if (ret)
433 goto error_pm;
434
435 ret = rtnl_link_register(&link_ops);
436 if (ret)
437 goto error_netdevice;
438
439 return 0;
440
441error_netdevice:
442 unregister_netdevice_notifier(&netdevice_notifier);
443error_pm:
444#ifdef CONFIG_PM_SLEEP
445 unregister_pm_notifier(&pm_notifier);
446#endif
447 return ret;
448}
449
450void wg_device_uninit(void)
451{
452 rtnl_link_unregister(&link_ops);
453 unregister_netdevice_notifier(&netdevice_notifier);
454#ifdef CONFIG_PM_SLEEP
455 unregister_pm_notifier(&pm_notifier);
456#endif
457 rcu_barrier();
458}