blob: 96f975777438f7f03614dbfa0c1a978822b0687b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Tom Parkin20dcb112020-07-22 17:32:06 +01002/* L2TPv3 IP encapsulation support for IPv6
Chris Elstona32e0ee2012-04-29 21:48:54 +00003 *
4 * Copyright (c) 2012 Katalix Systems Ltd
Chris Elstona32e0ee2012-04-29 21:48:54 +00005 */
6
Joe Perchesa4ca44f2012-05-16 09:55:56 +00007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
Chris Elstona32e0ee2012-04-29 21:48:54 +00009#include <linux/icmp.h>
10#include <linux/module.h>
11#include <linux/skbuff.h>
12#include <linux/random.h>
13#include <linux/socket.h>
14#include <linux/l2tp.h>
15#include <linux/in.h>
16#include <linux/in6.h>
17#include <net/sock.h>
18#include <net/ip.h>
19#include <net/icmp.h>
20#include <net/udp.h>
21#include <net/inet_common.h>
Chris Elstona32e0ee2012-04-29 21:48:54 +000022#include <net/tcp_states.h>
23#include <net/protocol.h>
24#include <net/xfrm.h>
25
26#include <net/transp_v6.h>
27#include <net/addrconf.h>
28#include <net/ip6_route.h>
29
30#include "l2tp_core.h"
31
32struct l2tp_ip6_sock {
33 /* inet_sock has to be the first member of l2tp_ip6_sock */
34 struct inet_sock inet;
35
36 u32 conn_id;
37 u32 peer_conn_id;
38
39 /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
Tom Parkin20dcb112020-07-22 17:32:06 +010040 * inet6_sk_generic
41 */
Chris Elstona32e0ee2012-04-29 21:48:54 +000042 struct ipv6_pinfo inet6;
43};
44
45static DEFINE_RWLOCK(l2tp_ip6_lock);
46static struct hlist_head l2tp_ip6_table;
47static struct hlist_head l2tp_ip6_bind_table;
48
49static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
50{
51 return (struct l2tp_ip6_sock *)sk;
52}
53
Guillaume Naultbb39b0b2017-01-06 20:03:55 +010054static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
55 const struct in6_addr *laddr,
Guillaume Naulta9b2dff2016-12-30 19:48:20 +010056 const struct in6_addr *raddr,
Chris Elstona32e0ee2012-04-29 21:48:54 +000057 int dif, u32 tunnel_id)
58{
Chris Elstona32e0ee2012-04-29 21:48:54 +000059 struct sock *sk;
60
Sasha Levinb67bfe02013-02-27 17:06:00 -080061 sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
Guillaume Nault97b84fd2016-12-30 19:48:19 +010062 const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
Guillaume Naulta9b2dff2016-12-30 19:48:20 +010063 const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
Guillaume Naultbb39b0b2017-01-06 20:03:55 +010064 const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
Chris Elstona32e0ee2012-04-29 21:48:54 +000065
Guillaume Naultc5fdae02017-01-06 20:03:57 +010066 if (!net_eq(sock_net(sk), net))
67 continue;
68
69 if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
70 continue;
71
72 if (sk_laddr && !ipv6_addr_any(sk_laddr) &&
73 !ipv6_addr_any(laddr) && !ipv6_addr_equal(sk_laddr, laddr))
74 continue;
75
76 if (!ipv6_addr_any(sk_raddr) && raddr &&
77 !ipv6_addr_any(raddr) && !ipv6_addr_equal(sk_raddr, raddr))
78 continue;
79
80 if (l2tp->conn_id != tunnel_id)
81 continue;
82
83 goto found;
Chris Elstona32e0ee2012-04-29 21:48:54 +000084 }
85
86 sk = NULL;
87found:
88 return sk;
89}
90
Chris Elstona32e0ee2012-04-29 21:48:54 +000091/* When processing receive frames, there are two cases to
92 * consider. Data frames consist of a non-zero session-id and an
93 * optional cookie. Control frames consist of a regular L2TP header
94 * preceded by 32-bits of zeros.
95 *
96 * L2TPv3 Session Header Over IP
97 *
98 * 0 1 2 3
99 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
100 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
101 * | Session ID |
102 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
103 * | Cookie (optional, maximum 64 bits)...
104 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
105 * |
106 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
107 *
108 * L2TPv3 Control Message Header Over IP
109 *
110 * 0 1 2 3
111 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
112 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
113 * | (32 bits of zeros) |
114 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
115 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
116 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
117 * | Control Connection ID |
118 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
119 * | Ns | Nr |
120 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
121 *
122 * All control frames are passed to userspace.
123 */
124static int l2tp_ip6_recv(struct sk_buff *skb)
125{
Shmulik Ladkani0e6b5252016-05-26 20:16:36 +0300126 struct net *net = dev_net(skb->dev);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000127 struct sock *sk;
128 u32 session_id;
129 u32 tunnel_id;
130 unsigned char *ptr, *optr;
131 struct l2tp_session *session;
132 struct l2tp_tunnel *tunnel = NULL;
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100133 struct ipv6hdr *iph;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000134
Chris Elstona32e0ee2012-04-29 21:48:54 +0000135 if (!pskb_may_pull(skb, 4))
136 goto discard;
137
Haishuang Yanbe447f32016-04-03 22:09:24 +0800138 /* Point to L2TP header */
Tom Parkin95075152020-07-24 16:31:49 +0100139 optr = skb->data;
140 ptr = skb->data;
Tom Parkinb71a61c2020-07-22 17:32:05 +0100141 session_id = ntohl(*((__be32 *)ptr));
Chris Elstona32e0ee2012-04-29 21:48:54 +0000142 ptr += 4;
143
144 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
145 * the session_id. If it is 0, the packet is a L2TP control
146 * frame and the session_id value can be discarded.
147 */
148 if (session_id == 0) {
149 __skb_pull(skb, 4);
150 goto pass_up;
151 }
152
153 /* Ok, this is a data packet. Lookup the session. */
Guillaume Nault01e28b92018-08-10 13:21:57 +0200154 session = l2tp_session_get(net, session_id);
Guillaume Nault61b9a042017-03-31 13:02:25 +0200155 if (!session)
Chris Elstona32e0ee2012-04-29 21:48:54 +0000156 goto discard;
157
158 tunnel = session->tunnel;
Guillaume Nault61b9a042017-03-31 13:02:25 +0200159 if (!tunnel)
160 goto discard_sess;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000161
Jacob Wen4522a702019-01-30 14:55:14 +0800162 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
163 goto discard_sess;
164
Guillaume Nault2b139e62018-07-25 14:53:33 +0200165 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
Guillaume Nault61b9a042017-03-31 13:02:25 +0200166 l2tp_session_dec_refcount(session);
167
Chris Elstona32e0ee2012-04-29 21:48:54 +0000168 return 0;
169
170pass_up:
171 /* Get the tunnel_id from the L2TP header */
172 if (!pskb_may_pull(skb, 12))
173 goto discard;
174
175 if ((skb->data[0] & 0xc0) != 0xc0)
176 goto discard;
177
Tom Parkinb71a61c2020-07-22 17:32:05 +0100178 tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100179 iph = ipv6_hdr(skb);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000180
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100181 read_lock_bh(&l2tp_ip6_lock);
182 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
183 inet6_iif(skb), tunnel_id);
184 if (!sk) {
Chris Elstona32e0ee2012-04-29 21:48:54 +0000185 read_unlock_bh(&l2tp_ip6_lock);
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100186 goto discard;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000187 }
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100188 sock_hold(sk);
189 read_unlock_bh(&l2tp_ip6_lock);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000190
Chris Elstona32e0ee2012-04-29 21:48:54 +0000191 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
192 goto discard_put;
193
Florian Westphal895b5c92019-09-29 20:54:03 +0200194 nf_reset_ct(skb);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000195
196 return sk_receive_skb(sk, skb, 1);
197
Guillaume Nault61b9a042017-03-31 13:02:25 +0200198discard_sess:
Guillaume Nault61b9a042017-03-31 13:02:25 +0200199 l2tp_session_dec_refcount(session);
200 goto discard;
201
Chris Elstona32e0ee2012-04-29 21:48:54 +0000202discard_put:
203 sock_put(sk);
204
205discard:
206 kfree_skb(skb);
207 return 0;
208}
209
Eric Dumazet02c71b12020-05-29 11:20:53 -0700210static int l2tp_ip6_hash(struct sock *sk)
211{
212 if (sk_unhashed(sk)) {
213 write_lock_bh(&l2tp_ip6_lock);
214 sk_add_node(sk, &l2tp_ip6_table);
215 write_unlock_bh(&l2tp_ip6_lock);
216 }
217 return 0;
218}
219
220static void l2tp_ip6_unhash(struct sock *sk)
221{
222 if (sk_unhashed(sk))
223 return;
224 write_lock_bh(&l2tp_ip6_lock);
225 sk_del_node_init(sk);
226 write_unlock_bh(&l2tp_ip6_lock);
227}
228
Chris Elstona32e0ee2012-04-29 21:48:54 +0000229static int l2tp_ip6_open(struct sock *sk)
230{
231 /* Prevent autobind. We don't have ports. */
232 inet_sk(sk)->inet_num = IPPROTO_L2TP;
233
Eric Dumazet02c71b12020-05-29 11:20:53 -0700234 l2tp_ip6_hash(sk);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000235 return 0;
236}
237
238static void l2tp_ip6_close(struct sock *sk, long timeout)
239{
240 write_lock_bh(&l2tp_ip6_lock);
241 hlist_del_init(&sk->sk_bind_node);
242 sk_del_node_init(sk);
243 write_unlock_bh(&l2tp_ip6_lock);
244
245 sk_common_release(sk);
246}
247
248static void l2tp_ip6_destroy_sock(struct sock *sk)
249{
Tom Parkin45faeff2020-09-03 09:54:51 +0100250 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
Tom Parkin93606312013-03-19 06:11:15 +0000251
Chris Elstona32e0ee2012-04-29 21:48:54 +0000252 lock_sock(sk);
253 ip6_flush_pending_frames(sk);
254 release_sock(sk);
255
James Chapmand00fa9a2018-02-23 17:45:45 +0000256 if (tunnel)
257 l2tp_tunnel_delete(tunnel);
Tom Parkin93606312013-03-19 06:11:15 +0000258
Chris Elstona32e0ee2012-04-29 21:48:54 +0000259 inet6_destroy_sock(sk);
260}
261
262static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
263{
264 struct inet_sock *inet = inet_sk(sk);
265 struct ipv6_pinfo *np = inet6_sk(sk);
Tom Parkinb71a61c2020-07-22 17:32:05 +0100266 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr;
Shmulik Ladkani0e6b5252016-05-26 20:16:36 +0300267 struct net *net = sock_net(sk);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000268 __be32 v4addr = 0;
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100269 int bound_dev_if;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000270 int addr_type;
271 int err;
272
James Chapmanc51ce492012-05-29 03:30:42 +0000273 if (addr->l2tp_family != AF_INET6)
274 return -EINVAL;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000275 if (addr_len < sizeof(*addr))
276 return -EINVAL;
277
278 addr_type = ipv6_addr_type(&addr->l2tp_addr);
279
280 /* l2tp_ip6 sockets are IPv6 only */
281 if (addr_type == IPV6_ADDR_MAPPED)
282 return -EADDRNOTAVAIL;
283
284 /* L2TP is point-point, not multicast */
285 if (addr_type & IPV6_ADDR_MULTICAST)
286 return -EADDRNOTAVAIL;
287
Chris Elstona32e0ee2012-04-29 21:48:54 +0000288 lock_sock(sk);
289
290 err = -EINVAL;
Guillaume Nault32c23112016-11-18 22:13:00 +0100291 if (!sock_flag(sk, SOCK_ZAPPED))
292 goto out_unlock;
293
Chris Elstona32e0ee2012-04-29 21:48:54 +0000294 if (sk->sk_state != TCP_CLOSE)
295 goto out_unlock;
296
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100297 bound_dev_if = sk->sk_bound_dev_if;
298
Chris Elstona32e0ee2012-04-29 21:48:54 +0000299 /* Check if the address belongs to the host. */
300 rcu_read_lock();
301 if (addr_type != IPV6_ADDR_ANY) {
302 struct net_device *dev = NULL;
303
304 if (addr_type & IPV6_ADDR_LINKLOCAL) {
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100305 if (addr->l2tp_scope_id)
306 bound_dev_if = addr->l2tp_scope_id;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000307
308 /* Binding to link-local address requires an
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100309 * interface.
310 */
311 if (!bound_dev_if)
Chris Elstona32e0ee2012-04-29 21:48:54 +0000312 goto out_unlock_rcu;
313
314 err = -ENODEV;
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100315 dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000316 if (!dev)
317 goto out_unlock_rcu;
318 }
319
320 /* ipv4 addr of the socket is invalid. Only the
321 * unspecified and mapped address have a v4 equivalent.
322 */
323 v4addr = LOOPBACK4_IPV6;
324 err = -EADDRNOTAVAIL;
325 if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
326 goto out_unlock_rcu;
327 }
328 rcu_read_unlock();
329
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100330 write_lock_bh(&l2tp_ip6_lock);
Guillaume Naulta9b2dff2016-12-30 19:48:20 +0100331 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100332 addr->l2tp_conn_id)) {
333 write_unlock_bh(&l2tp_ip6_lock);
334 err = -EADDRINUSE;
335 goto out_unlock;
336 }
337
338 inet->inet_saddr = v4addr;
339 inet->inet_rcv_saddr = v4addr;
340 sk->sk_bound_dev_if = bound_dev_if;
Eric Dumazetefe42082013-10-03 15:42:29 -0700341 sk->sk_v6_rcv_saddr = addr->l2tp_addr;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000342 np->saddr = addr->l2tp_addr;
343
344 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
345
Chris Elstona32e0ee2012-04-29 21:48:54 +0000346 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
347 sk_del_node_init(sk);
348 write_unlock_bh(&l2tp_ip6_lock);
349
James Chapmanc51ce492012-05-29 03:30:42 +0000350 sock_reset_flag(sk, SOCK_ZAPPED);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000351 release_sock(sk);
352 return 0;
353
354out_unlock_rcu:
355 rcu_read_unlock();
356out_unlock:
357 release_sock(sk);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000358
Chris Elstona32e0ee2012-04-29 21:48:54 +0000359 return err;
360}
361
362static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
363 int addr_len)
364{
Tom Parkinb71a61c2020-07-22 17:32:05 +0100365 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
366 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000367 struct in6_addr *daddr;
368 int addr_type;
369 int rc;
370
371 if (addr_len < sizeof(*lsa))
372 return -EINVAL;
373
Hannes Frederic Sowa82b276c2014-01-20 05:16:39 +0100374 if (usin->sin6_family != AF_INET6)
375 return -EINVAL;
376
Chris Elstona32e0ee2012-04-29 21:48:54 +0000377 addr_type = ipv6_addr_type(&usin->sin6_addr);
378 if (addr_type & IPV6_ADDR_MULTICAST)
379 return -EINVAL;
380
381 if (addr_type & IPV6_ADDR_MAPPED) {
382 daddr = &usin->sin6_addr;
383 if (ipv4_is_multicast(daddr->s6_addr32[3]))
384 return -EINVAL;
385 }
386
Chris Elstona32e0ee2012-04-29 21:48:54 +0000387 lock_sock(sk);
388
Guillaume Nault0382a252016-11-29 13:09:44 +0100389 /* Must bind first - autobinding does not work */
390 if (sock_flag(sk, SOCK_ZAPPED)) {
391 rc = -EINVAL;
392 goto out_sk;
393 }
394
395 rc = __ip6_datagram_connect(sk, uaddr, addr_len);
396 if (rc < 0)
397 goto out_sk;
398
Chris Elstona32e0ee2012-04-29 21:48:54 +0000399 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
400
401 write_lock_bh(&l2tp_ip6_lock);
402 hlist_del_init(&sk->sk_bind_node);
403 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
404 write_unlock_bh(&l2tp_ip6_lock);
405
Guillaume Nault0382a252016-11-29 13:09:44 +0100406out_sk:
Chris Elstona32e0ee2012-04-29 21:48:54 +0000407 release_sock(sk);
408
409 return rc;
410}
411
James Chapmanc51ce492012-05-29 03:30:42 +0000412static int l2tp_ip6_disconnect(struct sock *sk, int flags)
413{
414 if (sock_flag(sk, SOCK_ZAPPED))
415 return 0;
416
Eric Dumazet286c72d2016-10-20 09:39:40 -0700417 return __udp_disconnect(sk, flags);
James Chapmanc51ce492012-05-29 03:30:42 +0000418}
419
Chris Elstona32e0ee2012-04-29 21:48:54 +0000420static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100421 int peer)
Chris Elstona32e0ee2012-04-29 21:48:54 +0000422{
423 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
424 struct sock *sk = sock->sk;
425 struct ipv6_pinfo *np = inet6_sk(sk);
426 struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
427
428 lsa->l2tp_family = AF_INET6;
429 lsa->l2tp_flowinfo = 0;
430 lsa->l2tp_scope_id = 0;
Mathias Krause04d4fbc2012-08-15 11:31:52 +0000431 lsa->l2tp_unused = 0;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000432 if (peer) {
433 if (!lsk->peer_conn_id)
434 return -ENOTCONN;
435 lsa->l2tp_conn_id = lsk->peer_conn_id;
Eric Dumazetefe42082013-10-03 15:42:29 -0700436 lsa->l2tp_addr = sk->sk_v6_daddr;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000437 if (np->sndflow)
438 lsa->l2tp_flowinfo = np->flow_label;
439 } else {
Eric Dumazetefe42082013-10-03 15:42:29 -0700440 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
Chris Elstona32e0ee2012-04-29 21:48:54 +0000441 lsa->l2tp_addr = np->saddr;
442 else
Eric Dumazetefe42082013-10-03 15:42:29 -0700443 lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000444
445 lsa->l2tp_conn_id = lsk->conn_id;
446 }
447 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
448 lsa->l2tp_scope_id = sk->sk_bound_dev_if;
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100449 return sizeof(*lsa);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000450}
451
452static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
453{
454 int rc;
455
456 /* Charge it to the socket, dropping if the queue is full. */
457 rc = sock_queue_rcv_skb(sk, skb);
458 if (rc < 0)
459 goto drop;
460
461 return 0;
462
463drop:
Shmulik Ladkani0e6b5252016-05-26 20:16:36 +0300464 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000465 kfree_skb(skb);
466 return -1;
467}
468
469static int l2tp_ip6_push_pending_frames(struct sock *sk)
470{
471 struct sk_buff *skb;
472 __be32 *transhdr = NULL;
473 int err = 0;
474
475 skb = skb_peek(&sk->sk_write_queue);
Tom Parkin0febc7b2020-07-23 12:29:50 +0100476 if (!skb)
Chris Elstona32e0ee2012-04-29 21:48:54 +0000477 goto out;
478
479 transhdr = (__be32 *)skb_transport_header(skb);
480 *transhdr = 0;
481
482 err = ip6_push_pending_frames(sk);
483
484out:
485 return err;
486}
487
488/* Userspace will call sendmsg() on the tunnel socket to send L2TP
489 * control frames.
490 */
Ying Xue1b784142015-03-02 15:37:48 +0800491static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
Chris Elstona32e0ee2012-04-29 21:48:54 +0000492{
493 struct ipv6_txoptions opt_space;
Steffen Hurrle342dfc32014-01-17 22:53:15 +0100494 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000495 struct in6_addr *daddr, *final_p, final;
496 struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800497 struct ipv6_txoptions *opt_to_free = NULL;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000498 struct ipv6_txoptions *opt = NULL;
499 struct ip6_flowlabel *flowlabel = NULL;
500 struct dst_entry *dst = NULL;
501 struct flowi6 fl6;
Wei Wang26879da2016-05-02 21:40:07 -0700502 struct ipcm6_cookie ipc6;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000503 int addr_len = msg->msg_namelen;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000504 int transhdrlen = 4; /* zero session-id */
505 int ulen = len + transhdrlen;
506 int err;
507
508 /* Rough check on arithmetic overflow,
Tom Parkin20dcb112020-07-22 17:32:06 +0100509 * better check is made in ip6_append_data().
Chris Elstona32e0ee2012-04-29 21:48:54 +0000510 */
511 if (len > INT_MAX)
512 return -EMSGSIZE;
513
514 /* Mirror BSD error message compatibility */
515 if (msg->msg_flags & MSG_OOB)
516 return -EOPNOTSUPP;
517
Tom Parkin20dcb112020-07-22 17:32:06 +0100518 /* Get and verify the address */
Chris Elstona32e0ee2012-04-29 21:48:54 +0000519 memset(&fl6, 0, sizeof(fl6));
520
521 fl6.flowi6_mark = sk->sk_mark;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900522 fl6.flowi6_uid = sk->sk_uid;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000523
Willem de Bruijnb515430a2018-07-06 10:12:55 -0400524 ipcm6_init(&ipc6);
Wei Wang26879da2016-05-02 21:40:07 -0700525
Chris Elstona32e0ee2012-04-29 21:48:54 +0000526 if (lsa) {
527 if (addr_len < SIN6_LEN_RFC2133)
528 return -EINVAL;
529
530 if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
531 return -EAFNOSUPPORT;
532
533 daddr = &lsa->l2tp_addr;
534 if (np->sndflow) {
535 fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
Tom Parkinb71a61c2020-07-22 17:32:05 +0100536 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
Chris Elstona32e0ee2012-04-29 21:48:54 +0000537 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Willem de Bruijn59c820b2019-07-07 05:34:45 -0400538 if (IS_ERR(flowlabel))
Chris Elstona32e0ee2012-04-29 21:48:54 +0000539 return -EINVAL;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000540 }
541 }
542
Tom Parkin20dcb112020-07-22 17:32:06 +0100543 /* Otherwise it will be difficult to maintain
Chris Elstona32e0ee2012-04-29 21:48:54 +0000544 * sk->sk_dst_cache.
545 */
546 if (sk->sk_state == TCP_ESTABLISHED &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700547 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
548 daddr = &sk->sk_v6_daddr;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000549
550 if (addr_len >= sizeof(struct sockaddr_in6) &&
551 lsa->l2tp_scope_id &&
552 ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
553 fl6.flowi6_oif = lsa->l2tp_scope_id;
554 } else {
555 if (sk->sk_state != TCP_ESTABLISHED)
556 return -EDESTADDRREQ;
557
Eric Dumazetefe42082013-10-03 15:42:29 -0700558 daddr = &sk->sk_v6_daddr;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000559 fl6.flowlabel = np->flow_label;
560 }
561
562 if (fl6.flowi6_oif == 0)
563 fl6.flowi6_oif = sk->sk_bound_dev_if;
564
565 if (msg->msg_controllen) {
566 opt = &opt_space;
567 memset(opt, 0, sizeof(struct ipv6_txoptions));
568 opt->tot_len = sizeof(struct ipv6_txoptions);
Wei Wang26879da2016-05-02 21:40:07 -0700569 ipc6.opt = opt;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000570
Willem de Bruijn5fdaa882018-07-06 10:12:57 -0400571 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
Wei Wang26879da2016-05-02 21:40:07 -0700572 if (err < 0) {
Chris Elstona32e0ee2012-04-29 21:48:54 +0000573 fl6_sock_release(flowlabel);
574 return err;
575 }
576 if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
577 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Willem de Bruijn59c820b2019-07-07 05:34:45 -0400578 if (IS_ERR(flowlabel))
Chris Elstona32e0ee2012-04-29 21:48:54 +0000579 return -EINVAL;
580 }
Tom Parkinb71a61c2020-07-22 17:32:05 +0100581 if (!(opt->opt_nflen | opt->opt_flen))
Chris Elstona32e0ee2012-04-29 21:48:54 +0000582 opt = NULL;
583 }
584
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800585 if (!opt) {
586 opt = txopt_get(np);
587 opt_to_free = opt;
588 }
Chris Elstona32e0ee2012-04-29 21:48:54 +0000589 if (flowlabel)
590 opt = fl6_merge_options(&opt_space, flowlabel, opt);
591 opt = ipv6_fixup_options(&opt_space, opt);
Wei Wang26879da2016-05-02 21:40:07 -0700592 ipc6.opt = opt;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000593
594 fl6.flowi6_proto = sk->sk_protocol;
595 if (!ipv6_addr_any(daddr))
596 fl6.daddr = *daddr;
597 else
598 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
599 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
600 fl6.saddr = np->saddr;
601
602 final_p = fl6_update_dst(&fl6, opt, &final);
603
604 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
605 fl6.flowi6_oif = np->mcast_oif;
606 else if (!fl6.flowi6_oif)
607 fl6.flowi6_oif = np->ucast_oif;
608
Paul Moore3df98d72020-09-27 22:38:26 -0400609 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
Chris Elstona32e0ee2012-04-29 21:48:54 +0000610
Hannes Frederic Sowa38b70972016-06-11 20:08:19 +0200611 if (ipc6.tclass < 0)
612 ipc6.tclass = np->tclass;
613
614 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
615
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100616 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000617 if (IS_ERR(dst)) {
618 err = PTR_ERR(dst);
619 goto out;
620 }
621
Wei Wang26879da2016-05-02 21:40:07 -0700622 if (ipc6.hlimit < 0)
623 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000624
Wei Wang26879da2016-05-02 21:40:07 -0700625 if (ipc6.dontfrag < 0)
626 ipc6.dontfrag = np->dontfrag;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000627
628 if (msg->msg_flags & MSG_CONFIRM)
629 goto do_confirm;
630
631back_from_confirm:
632 lock_sock(sk);
Al Virof69e6d12014-11-24 13:23:40 -0500633 err = ip6_append_data(sk, ip_generic_getfrag, msg,
Wei Wang26879da2016-05-02 21:40:07 -0700634 ulen, transhdrlen, &ipc6,
Chris Elstona32e0ee2012-04-29 21:48:54 +0000635 &fl6, (struct rt6_info *)dst,
Willem de Bruijn5fdaa882018-07-06 10:12:57 -0400636 msg->msg_flags);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000637 if (err)
638 ip6_flush_pending_frames(sk);
639 else if (!(msg->msg_flags & MSG_MORE))
640 err = l2tp_ip6_push_pending_frames(sk);
641 release_sock(sk);
642done:
643 dst_release(dst);
644out:
645 fl6_sock_release(flowlabel);
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800646 txopt_put(opt_to_free);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000647
648 return err < 0 ? err : len;
649
650do_confirm:
Julian Anastasov0dec8792017-02-06 23:14:16 +0200651 if (msg->msg_flags & MSG_PROBE)
652 dst_confirm_neigh(dst, &fl6.daddr);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000653 if (!(msg->msg_flags & MSG_PROBE) || len)
654 goto back_from_confirm;
655 err = 0;
656 goto done;
657}
658
Ying Xue1b784142015-03-02 15:37:48 +0800659static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
660 int noblock, int flags, int *addr_len)
Chris Elstona32e0ee2012-04-29 21:48:54 +0000661{
Tom Parkin700163d2013-01-31 01:02:26 +0000662 struct ipv6_pinfo *np = inet6_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +0100663 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000664 size_t copied = 0;
665 int err = -EOPNOTSUPP;
666 struct sk_buff *skb;
667
668 if (flags & MSG_OOB)
669 goto out;
670
Chris Elstona32e0ee2012-04-29 21:48:54 +0000671 if (flags & MSG_ERRQUEUE)
Hannes Frederic Sowa85fbaa72013-11-23 00:46:12 +0100672 return ipv6_recv_error(sk, msg, len, addr_len);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000673
674 skb = skb_recv_datagram(sk, flags, noblock, &err);
675 if (!skb)
676 goto out;
677
678 copied = skb->len;
679 if (len < copied) {
680 msg->msg_flags |= MSG_TRUNC;
681 copied = len;
682 }
683
David S. Miller51f3d022014-11-05 16:46:40 -0500684 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000685 if (err)
686 goto done;
687
688 sock_recv_timestamp(msg, sk, skb);
689
690 /* Copy the address. */
691 if (lsa) {
692 lsa->l2tp_family = AF_INET6;
693 lsa->l2tp_unused = 0;
694 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
695 lsa->l2tp_flowinfo = 0;
696 lsa->l2tp_scope_id = 0;
Mathias Krauseb860d3c2013-04-07 01:51:55 +0000697 lsa->l2tp_conn_id = 0;
Chris Elstona32e0ee2012-04-29 21:48:54 +0000698 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
Duan Jiong43304872014-08-01 09:52:58 +0800699 lsa->l2tp_scope_id = inet6_iif(skb);
Eric Dumazet163d1c32019-03-12 06:50:11 -0700700 *addr_len = sizeof(*lsa);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000701 }
702
Tom Parkin700163d2013-01-31 01:02:26 +0000703 if (np->rxopt.all)
704 ip6_datagram_recv_ctl(sk, msg, skb);
Chris Elstona32e0ee2012-04-29 21:48:54 +0000705
706 if (flags & MSG_TRUNC)
707 copied = skb->len;
708done:
709 skb_free_datagram(sk, skb);
710out:
711 return err ? err : copied;
712}
713
714static struct proto l2tp_ip6_prot = {
715 .name = "L2TP/IPv6",
716 .owner = THIS_MODULE,
717 .init = l2tp_ip6_open,
718 .close = l2tp_ip6_close,
719 .bind = l2tp_ip6_bind,
720 .connect = l2tp_ip6_connect,
James Chapmanc51ce492012-05-29 03:30:42 +0000721 .disconnect = l2tp_ip6_disconnect,
Eric Dumazet72fb96e72017-02-09 16:15:52 -0800722 .ioctl = l2tp_ioctl,
Chris Elstona32e0ee2012-04-29 21:48:54 +0000723 .destroy = l2tp_ip6_destroy_sock,
724 .setsockopt = ipv6_setsockopt,
725 .getsockopt = ipv6_getsockopt,
726 .sendmsg = l2tp_ip6_sendmsg,
727 .recvmsg = l2tp_ip6_recvmsg,
728 .backlog_rcv = l2tp_ip6_backlog_recv,
Eric Dumazet02c71b12020-05-29 11:20:53 -0700729 .hash = l2tp_ip6_hash,
730 .unhash = l2tp_ip6_unhash,
Chris Elstona32e0ee2012-04-29 21:48:54 +0000731 .obj_size = sizeof(struct l2tp_ip6_sock),
Chris Elstona32e0ee2012-04-29 21:48:54 +0000732};
733
734static const struct proto_ops l2tp_ip6_ops = {
735 .family = PF_INET6,
736 .owner = THIS_MODULE,
737 .release = inet6_release,
738 .bind = inet6_bind,
739 .connect = inet_dgram_connect,
740 .socketpair = sock_no_socketpair,
741 .accept = sock_no_accept,
742 .getname = l2tp_ip6_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700743 .poll = datagram_poll,
Chris Elstona32e0ee2012-04-29 21:48:54 +0000744 .ioctl = inet6_ioctl,
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +0200745 .gettstamp = sock_gettstamp,
Chris Elstona32e0ee2012-04-29 21:48:54 +0000746 .listen = sock_no_listen,
747 .shutdown = inet_shutdown,
748 .setsockopt = sock_common_setsockopt,
749 .getsockopt = sock_common_getsockopt,
750 .sendmsg = inet_sendmsg,
751 .recvmsg = sock_common_recvmsg,
752 .mmap = sock_no_mmap,
753 .sendpage = sock_no_sendpage,
754#ifdef CONFIG_COMPAT
Christoph Hellwig39869122020-05-18 08:28:06 +0200755 .compat_ioctl = inet6_compat_ioctl,
Chris Elstona32e0ee2012-04-29 21:48:54 +0000756#endif
757};
758
759static struct inet_protosw l2tp_ip6_protosw = {
760 .type = SOCK_DGRAM,
761 .protocol = IPPROTO_L2TP,
762 .prot = &l2tp_ip6_prot,
763 .ops = &l2tp_ip6_ops,
Chris Elstona32e0ee2012-04-29 21:48:54 +0000764};
765
766static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
767 .handler = l2tp_ip6_recv,
768};
769
770static int __init l2tp_ip6_init(void)
771{
772 int err;
773
Joe Perchesa4ca44f2012-05-16 09:55:56 +0000774 pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
Chris Elstona32e0ee2012-04-29 21:48:54 +0000775
776 err = proto_register(&l2tp_ip6_prot, 1);
777 if (err != 0)
778 goto out;
779
780 err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
781 if (err)
782 goto out1;
783
784 inet6_register_protosw(&l2tp_ip6_protosw);
785 return 0;
786
787out1:
788 proto_unregister(&l2tp_ip6_prot);
789out:
790 return err;
791}
792
793static void __exit l2tp_ip6_exit(void)
794{
795 inet6_unregister_protosw(&l2tp_ip6_protosw);
796 inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
797 proto_unregister(&l2tp_ip6_prot);
798}
799
800module_init(l2tp_ip6_init);
801module_exit(l2tp_ip6_exit);
802
803MODULE_LICENSE("GPL");
804MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
805MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
806MODULE_VERSION("1.0");
807
808/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
809 * enums
810 */
811MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
stephen hemminger163c2e22015-09-23 21:33:35 -0700812MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);