blob: 78155fdb8c3622861f0f2b7a5f98535203671375 [file] [log] [blame]
David Lebrund1df6fd2017-08-05 12:38:26 +02001/*
2 * SR-IPv6 implementation
3 *
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01004 * Authors:
David Lebrund1df6fd2017-08-05 12:38:26 +02005 * David Lebrun <david.lebrun@uclouvain.be>
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01006 * eBPF support: Mathieu Xhonneux <m.xhonneux@gmail.com>
David Lebrund1df6fd2017-08-05 12:38:26 +02007 *
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/types.h>
16#include <linux/skbuff.h>
17#include <linux/net.h>
18#include <linux/module.h>
19#include <net/ip.h>
20#include <net/lwtunnel.h>
21#include <net/netevent.h>
22#include <net/netns/generic.h>
23#include <net/ip6_fib.h>
24#include <net/route.h>
25#include <net/seg6.h>
26#include <linux/seg6.h>
27#include <linux/seg6_local.h>
28#include <net/addrconf.h>
29#include <net/ip6_route.h>
30#include <net/dst_cache.h>
31#ifdef CONFIG_IPV6_SEG6_HMAC
32#include <net/seg6_hmac.h>
33#endif
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +010034#include <net/seg6_local.h>
David Lebrun891ef8d2017-08-25 09:58:17 +020035#include <linux/etherdevice.h>
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +010036#include <linux/bpf.h>
David Lebrund1df6fd2017-08-05 12:38:26 +020037
38struct seg6_local_lwt;
39
40struct seg6_action_desc {
41 int action;
42 unsigned long attrs;
43 int (*input)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
44 int static_headroom;
45};
46
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +010047struct bpf_lwt_prog {
48 struct bpf_prog *prog;
49 char *name;
50};
51
David Lebrund1df6fd2017-08-05 12:38:26 +020052struct seg6_local_lwt {
53 int action;
54 struct ipv6_sr_hdr *srh;
55 int table;
56 struct in_addr nh4;
57 struct in6_addr nh6;
58 int iif;
59 int oif;
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +010060 struct bpf_lwt_prog bpf;
David Lebrund1df6fd2017-08-05 12:38:26 +020061
62 int headroom;
63 struct seg6_action_desc *desc;
64};
65
66static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
67{
68 return (struct seg6_local_lwt *)lwt->data;
69}
70
David Lebrun140f04c2017-08-05 12:39:48 +020071static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
72{
73 struct ipv6_sr_hdr *srh;
Ahmed Abdelsalam5829d702017-08-30 10:50:37 +020074 int len, srhoff = 0;
David Lebrun140f04c2017-08-05 12:39:48 +020075
Ahmed Abdelsalam5829d702017-08-30 10:50:37 +020076 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
David Lebrun140f04c2017-08-05 12:39:48 +020077 return NULL;
78
Ahmed Abdelsalam5829d702017-08-30 10:50:37 +020079 if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
80 return NULL;
81
82 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
83
David Lebrun140f04c2017-08-05 12:39:48 +020084 len = (srh->hdrlen + 1) << 3;
85
Ahmed Abdelsalam5829d702017-08-30 10:50:37 +020086 if (!pskb_may_pull(skb, srhoff + len))
David Lebrun140f04c2017-08-05 12:39:48 +020087 return NULL;
88
89 if (!seg6_validate_srh(srh, len))
90 return NULL;
91
92 return srh;
93}
94
95static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
96{
97 struct ipv6_sr_hdr *srh;
98
99 srh = get_srh(skb);
100 if (!srh)
101 return NULL;
102
103 if (srh->segments_left == 0)
104 return NULL;
105
106#ifdef CONFIG_IPV6_SEG6_HMAC
107 if (!seg6_hmac_validate_skb(skb))
108 return NULL;
109#endif
110
111 return srh;
112}
113
David Lebrund7a669d2017-08-25 09:56:47 +0200114static bool decap_and_validate(struct sk_buff *skb, int proto)
115{
116 struct ipv6_sr_hdr *srh;
117 unsigned int off = 0;
118
119 srh = get_srh(skb);
120 if (srh && srh->segments_left > 0)
121 return false;
122
123#ifdef CONFIG_IPV6_SEG6_HMAC
124 if (srh && !seg6_hmac_validate_skb(skb))
125 return false;
126#endif
127
128 if (ipv6_find_hdr(skb, &off, proto, NULL, NULL) < 0)
129 return false;
130
131 if (!pskb_pull(skb, off))
132 return false;
133
134 skb_postpull_rcsum(skb, skb_network_header(skb), off);
135
136 skb_reset_network_header(skb);
137 skb_reset_transport_header(skb);
138 skb->encapsulation = 0;
139
140 return true;
141}
142
143static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr)
144{
145 struct in6_addr *addr;
146
147 srh->segments_left--;
148 addr = srh->segments + srh->segments_left;
149 *daddr = *addr;
150}
151
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100152int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
153 u32 tbl_id)
David Lebrund7a669d2017-08-25 09:56:47 +0200154{
155 struct net *net = dev_net(skb->dev);
156 struct ipv6hdr *hdr = ipv6_hdr(skb);
157 int flags = RT6_LOOKUP_F_HAS_SADDR;
158 struct dst_entry *dst = NULL;
159 struct rt6_info *rt;
160 struct flowi6 fl6;
161
162 fl6.flowi6_iif = skb->dev->ifindex;
163 fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
164 fl6.saddr = hdr->saddr;
165 fl6.flowlabel = ip6_flowinfo(hdr);
166 fl6.flowi6_mark = skb->mark;
167 fl6.flowi6_proto = hdr->nexthdr;
168
169 if (nhaddr)
170 fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
171
172 if (!tbl_id) {
David Ahernb75cc8f2018-03-02 08:32:17 -0800173 dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags);
David Lebrund7a669d2017-08-25 09:56:47 +0200174 } else {
175 struct fib6_table *table;
176
177 table = fib6_get_table(net, tbl_id);
178 if (!table)
179 goto out;
180
David Ahernb75cc8f2018-03-02 08:32:17 -0800181 rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
David Lebrund7a669d2017-08-25 09:56:47 +0200182 dst = &rt->dst;
183 }
184
185 if (dst && dst->dev->flags & IFF_LOOPBACK && !dst->error) {
186 dst_release(dst);
187 dst = NULL;
188 }
189
190out:
191 if (!dst) {
192 rt = net->ipv6.ip6_blk_hole_entry;
193 dst = &rt->dst;
194 dst_hold(dst);
195 }
196
197 skb_dst_drop(skb);
198 skb_dst_set(skb, dst);
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100199 return dst->error;
David Lebrund7a669d2017-08-25 09:56:47 +0200200}
201
David Lebrun140f04c2017-08-05 12:39:48 +0200202/* regular endpoint function */
203static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
204{
205 struct ipv6_sr_hdr *srh;
David Lebrun140f04c2017-08-05 12:39:48 +0200206
207 srh = get_and_validate_srh(skb);
208 if (!srh)
209 goto drop;
210
David Lebrund7a669d2017-08-25 09:56:47 +0200211 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
David Lebrun140f04c2017-08-05 12:39:48 +0200212
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100213 seg6_lookup_nexthop(skb, NULL, 0);
David Lebrun140f04c2017-08-05 12:39:48 +0200214
215 return dst_input(skb);
216
217drop:
218 kfree_skb(skb);
219 return -EINVAL;
220}
221
222/* regular endpoint, and forward to specified nexthop */
223static int input_action_end_x(struct sk_buff *skb, struct seg6_local_lwt *slwt)
224{
David Lebrun140f04c2017-08-05 12:39:48 +0200225 struct ipv6_sr_hdr *srh;
David Lebrun140f04c2017-08-05 12:39:48 +0200226
227 srh = get_and_validate_srh(skb);
228 if (!srh)
229 goto drop;
230
David Lebrund7a669d2017-08-25 09:56:47 +0200231 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
David Lebrun140f04c2017-08-05 12:39:48 +0200232
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100233 seg6_lookup_nexthop(skb, &slwt->nh6, 0);
David Lebrun140f04c2017-08-05 12:39:48 +0200234
235 return dst_input(skb);
236
237drop:
238 kfree_skb(skb);
239 return -EINVAL;
240}
241
David Lebrun891ef8d2017-08-25 09:58:17 +0200242static int input_action_end_t(struct sk_buff *skb, struct seg6_local_lwt *slwt)
243{
244 struct ipv6_sr_hdr *srh;
245
246 srh = get_and_validate_srh(skb);
247 if (!srh)
248 goto drop;
249
250 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
251
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100252 seg6_lookup_nexthop(skb, NULL, slwt->table);
David Lebrun891ef8d2017-08-25 09:58:17 +0200253
254 return dst_input(skb);
255
256drop:
257 kfree_skb(skb);
258 return -EINVAL;
259}
260
261/* decapsulate and forward inner L2 frame on specified interface */
262static int input_action_end_dx2(struct sk_buff *skb,
263 struct seg6_local_lwt *slwt)
264{
265 struct net *net = dev_net(skb->dev);
266 struct net_device *odev;
267 struct ethhdr *eth;
268
269 if (!decap_and_validate(skb, NEXTHDR_NONE))
270 goto drop;
271
272 if (!pskb_may_pull(skb, ETH_HLEN))
273 goto drop;
274
275 skb_reset_mac_header(skb);
276 eth = (struct ethhdr *)skb->data;
277
278 /* To determine the frame's protocol, we assume it is 802.3. This avoids
279 * a call to eth_type_trans(), which is not really relevant for our
280 * use case.
281 */
282 if (!eth_proto_is_802_3(eth->h_proto))
283 goto drop;
284
285 odev = dev_get_by_index_rcu(net, slwt->oif);
286 if (!odev)
287 goto drop;
288
289 /* As we accept Ethernet frames, make sure the egress device is of
290 * the correct type.
291 */
292 if (odev->type != ARPHRD_ETHER)
293 goto drop;
294
295 if (!(odev->flags & IFF_UP) || !netif_carrier_ok(odev))
296 goto drop;
297
298 skb_orphan(skb);
299
300 if (skb_warn_if_lro(skb))
301 goto drop;
302
303 skb_forward_csum(skb);
304
305 if (skb->len - ETH_HLEN > odev->mtu)
306 goto drop;
307
308 skb->dev = odev;
309 skb->protocol = eth->h_proto;
310
311 return dev_queue_xmit(skb);
312
313drop:
314 kfree_skb(skb);
315 return -EINVAL;
316}
317
David Lebrun140f04c2017-08-05 12:39:48 +0200318/* decapsulate and forward to specified nexthop */
319static int input_action_end_dx6(struct sk_buff *skb,
320 struct seg6_local_lwt *slwt)
321{
David Lebrund7a669d2017-08-25 09:56:47 +0200322 struct in6_addr *nhaddr = NULL;
David Lebrun140f04c2017-08-05 12:39:48 +0200323
324 /* this function accepts IPv6 encapsulated packets, with either
325 * an SRH with SL=0, or no SRH.
326 */
327
David Lebrund7a669d2017-08-25 09:56:47 +0200328 if (!decap_and_validate(skb, IPPROTO_IPV6))
David Lebrun140f04c2017-08-05 12:39:48 +0200329 goto drop;
330
David Lebrund7a669d2017-08-25 09:56:47 +0200331 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
David Lebrun140f04c2017-08-05 12:39:48 +0200332 goto drop;
David Lebrun140f04c2017-08-05 12:39:48 +0200333
334 /* The inner packet is not associated to any local interface,
335 * so we do not call netif_rx().
336 *
337 * If slwt->nh6 is set to ::, then lookup the nexthop for the
338 * inner packet's DA. Otherwise, use the specified nexthop.
339 */
340
David Lebrund7a669d2017-08-25 09:56:47 +0200341 if (!ipv6_addr_any(&slwt->nh6))
342 nhaddr = &slwt->nh6;
David Lebrun140f04c2017-08-05 12:39:48 +0200343
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100344 seg6_lookup_nexthop(skb, nhaddr, 0);
David Lebrun140f04c2017-08-05 12:39:48 +0200345
346 return dst_input(skb);
347drop:
348 kfree_skb(skb);
349 return -EINVAL;
350}
351
David Lebrun891ef8d2017-08-25 09:58:17 +0200352static int input_action_end_dx4(struct sk_buff *skb,
353 struct seg6_local_lwt *slwt)
354{
355 struct iphdr *iph;
356 __be32 nhaddr;
357 int err;
358
359 if (!decap_and_validate(skb, IPPROTO_IPIP))
360 goto drop;
361
362 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
363 goto drop;
364
365 skb->protocol = htons(ETH_P_IP);
366
367 iph = ip_hdr(skb);
368
369 nhaddr = slwt->nh4.s_addr ?: iph->daddr;
370
371 skb_dst_drop(skb);
372
373 err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
374 if (err)
375 goto drop;
376
377 return dst_input(skb);
378
379drop:
380 kfree_skb(skb);
381 return -EINVAL;
382}
383
384static int input_action_end_dt6(struct sk_buff *skb,
385 struct seg6_local_lwt *slwt)
386{
387 if (!decap_and_validate(skb, IPPROTO_IPV6))
388 goto drop;
389
390 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
391 goto drop;
392
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100393 seg6_lookup_nexthop(skb, NULL, slwt->table);
David Lebrun891ef8d2017-08-25 09:58:17 +0200394
395 return dst_input(skb);
396
397drop:
398 kfree_skb(skb);
399 return -EINVAL;
400}
401
David Lebrun140f04c2017-08-05 12:39:48 +0200402/* push an SRH on top of the current one */
403static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
404{
405 struct ipv6_sr_hdr *srh;
406 int err = -EINVAL;
407
408 srh = get_and_validate_srh(skb);
409 if (!srh)
410 goto drop;
411
412 err = seg6_do_srh_inline(skb, slwt->srh);
413 if (err)
414 goto drop;
415
416 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
417 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
418
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100419 seg6_lookup_nexthop(skb, NULL, 0);
David Lebrun140f04c2017-08-05 12:39:48 +0200420
421 return dst_input(skb);
422
423drop:
424 kfree_skb(skb);
425 return err;
426}
427
428/* encapsulate within an outer IPv6 header and a specified SRH */
429static int input_action_end_b6_encap(struct sk_buff *skb,
430 struct seg6_local_lwt *slwt)
431{
432 struct ipv6_sr_hdr *srh;
David Lebrun140f04c2017-08-05 12:39:48 +0200433 int err = -EINVAL;
434
435 srh = get_and_validate_srh(skb);
436 if (!srh)
437 goto drop;
438
David Lebrund7a669d2017-08-25 09:56:47 +0200439 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
David Lebrun140f04c2017-08-05 12:39:48 +0200440
441 skb_reset_inner_headers(skb);
442 skb->encapsulation = 1;
443
David Lebrun32d99d02017-08-25 09:56:44 +0200444 err = seg6_do_srh_encap(skb, slwt->srh, IPPROTO_IPV6);
David Lebrun140f04c2017-08-05 12:39:48 +0200445 if (err)
446 goto drop;
447
448 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
449 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
450
Mathieu Xhonneux1c1e7612018-05-20 14:58:13 +0100451 seg6_lookup_nexthop(skb, NULL, 0);
David Lebrun140f04c2017-08-05 12:39:48 +0200452
453 return dst_input(skb);
454
455drop:
456 kfree_skb(skb);
457 return err;
458}
459
Mathieu Xhonneuxfe94cc22018-05-20 14:58:14 +0100460DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
461
Mathieu Xhonneux486cdf22018-07-26 02:10:40 +0000462bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
463{
464 struct seg6_bpf_srh_state *srh_state =
465 this_cpu_ptr(&seg6_bpf_srh_states);
466 struct ipv6_sr_hdr *srh = srh_state->srh;
467
468 if (unlikely(srh == NULL))
469 return false;
470
471 if (unlikely(!srh_state->valid)) {
472 if ((srh_state->hdrlen & 7) != 0)
473 return false;
474
475 srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
476 if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))
477 return false;
478
479 srh_state->valid = true;
480 }
481
482 return true;
483}
484
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100485static int input_action_end_bpf(struct sk_buff *skb,
486 struct seg6_local_lwt *slwt)
487{
488 struct seg6_bpf_srh_state *srh_state =
489 this_cpu_ptr(&seg6_bpf_srh_states);
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100490 struct ipv6_sr_hdr *srh;
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100491 int ret;
492
493 srh = get_and_validate_srh(skb);
Mathieu Xhonneux486cdf22018-07-26 02:10:40 +0000494 if (!srh) {
495 kfree_skb(skb);
496 return -EINVAL;
497 }
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100498 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
499
500 /* preempt_disable is needed to protect the per-CPU buffer srh_state,
501 * which is also accessed by the bpf_lwt_seg6_* helpers
502 */
503 preempt_disable();
Mathieu Xhonneux486cdf22018-07-26 02:10:40 +0000504 srh_state->srh = srh;
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100505 srh_state->hdrlen = srh->hdrlen << 3;
Mathieu Xhonneux486cdf22018-07-26 02:10:40 +0000506 srh_state->valid = true;
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100507
508 rcu_read_lock();
509 bpf_compute_data_pointers(skb);
510 ret = bpf_prog_run_save_cb(slwt->bpf.prog, skb);
511 rcu_read_unlock();
512
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100513 switch (ret) {
514 case BPF_OK:
515 case BPF_REDIRECT:
516 break;
517 case BPF_DROP:
518 goto drop;
519 default:
520 pr_warn_once("bpf-seg6local: Illegal return value %u\n", ret);
521 goto drop;
522 }
523
Mathieu Xhonneux486cdf22018-07-26 02:10:40 +0000524 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100525 goto drop;
526
Mathieu Xhonneux486cdf22018-07-26 02:10:40 +0000527 preempt_enable();
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100528 if (ret != BPF_REDIRECT)
529 seg6_lookup_nexthop(skb, NULL, 0);
530
531 return dst_input(skb);
532
533drop:
Mathieu Xhonneux486cdf22018-07-26 02:10:40 +0000534 preempt_enable();
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100535 kfree_skb(skb);
536 return -EINVAL;
537}
538
David Lebrund1df6fd2017-08-05 12:38:26 +0200539static struct seg6_action_desc seg6_action_table[] = {
540 {
541 .action = SEG6_LOCAL_ACTION_END,
542 .attrs = 0,
David Lebrun140f04c2017-08-05 12:39:48 +0200543 .input = input_action_end,
David Lebrund1df6fd2017-08-05 12:38:26 +0200544 },
David Lebrun140f04c2017-08-05 12:39:48 +0200545 {
546 .action = SEG6_LOCAL_ACTION_END_X,
547 .attrs = (1 << SEG6_LOCAL_NH6),
548 .input = input_action_end_x,
549 },
550 {
David Lebrun891ef8d2017-08-25 09:58:17 +0200551 .action = SEG6_LOCAL_ACTION_END_T,
552 .attrs = (1 << SEG6_LOCAL_TABLE),
553 .input = input_action_end_t,
554 },
555 {
556 .action = SEG6_LOCAL_ACTION_END_DX2,
557 .attrs = (1 << SEG6_LOCAL_OIF),
558 .input = input_action_end_dx2,
559 },
560 {
David Lebrun140f04c2017-08-05 12:39:48 +0200561 .action = SEG6_LOCAL_ACTION_END_DX6,
562 .attrs = (1 << SEG6_LOCAL_NH6),
563 .input = input_action_end_dx6,
564 },
565 {
David Lebrun891ef8d2017-08-25 09:58:17 +0200566 .action = SEG6_LOCAL_ACTION_END_DX4,
567 .attrs = (1 << SEG6_LOCAL_NH4),
568 .input = input_action_end_dx4,
569 },
570 {
571 .action = SEG6_LOCAL_ACTION_END_DT6,
572 .attrs = (1 << SEG6_LOCAL_TABLE),
573 .input = input_action_end_dt6,
574 },
575 {
David Lebrun140f04c2017-08-05 12:39:48 +0200576 .action = SEG6_LOCAL_ACTION_END_B6,
577 .attrs = (1 << SEG6_LOCAL_SRH),
578 .input = input_action_end_b6,
579 },
580 {
581 .action = SEG6_LOCAL_ACTION_END_B6_ENCAP,
582 .attrs = (1 << SEG6_LOCAL_SRH),
583 .input = input_action_end_b6_encap,
584 .static_headroom = sizeof(struct ipv6hdr),
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100585 },
586 {
587 .action = SEG6_LOCAL_ACTION_END_BPF,
588 .attrs = (1 << SEG6_LOCAL_BPF),
589 .input = input_action_end_bpf,
590 },
591
David Lebrund1df6fd2017-08-05 12:38:26 +0200592};
593
594static struct seg6_action_desc *__get_action_desc(int action)
595{
596 struct seg6_action_desc *desc;
597 int i, count;
598
Colin Ian King709af182018-01-07 23:50:26 +0000599 count = ARRAY_SIZE(seg6_action_table);
David Lebrund1df6fd2017-08-05 12:38:26 +0200600 for (i = 0; i < count; i++) {
601 desc = &seg6_action_table[i];
602 if (desc->action == action)
603 return desc;
604 }
605
606 return NULL;
607}
608
609static int seg6_local_input(struct sk_buff *skb)
610{
611 struct dst_entry *orig_dst = skb_dst(skb);
612 struct seg6_action_desc *desc;
613 struct seg6_local_lwt *slwt;
614
David Lebrun62852172017-08-25 09:56:46 +0200615 if (skb->protocol != htons(ETH_P_IPV6)) {
616 kfree_skb(skb);
617 return -EINVAL;
618 }
619
David Lebrund1df6fd2017-08-05 12:38:26 +0200620 slwt = seg6_local_lwtunnel(orig_dst->lwtstate);
621 desc = slwt->desc;
622
623 return desc->input(skb, slwt);
624}
625
626static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = {
627 [SEG6_LOCAL_ACTION] = { .type = NLA_U32 },
628 [SEG6_LOCAL_SRH] = { .type = NLA_BINARY },
629 [SEG6_LOCAL_TABLE] = { .type = NLA_U32 },
630 [SEG6_LOCAL_NH4] = { .type = NLA_BINARY,
631 .len = sizeof(struct in_addr) },
632 [SEG6_LOCAL_NH6] = { .type = NLA_BINARY,
633 .len = sizeof(struct in6_addr) },
634 [SEG6_LOCAL_IIF] = { .type = NLA_U32 },
635 [SEG6_LOCAL_OIF] = { .type = NLA_U32 },
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100636 [SEG6_LOCAL_BPF] = { .type = NLA_NESTED },
David Lebrund1df6fd2017-08-05 12:38:26 +0200637};
638
David Lebrun2d9cc602017-08-05 12:38:27 +0200639static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt)
640{
641 struct ipv6_sr_hdr *srh;
642 int len;
643
644 srh = nla_data(attrs[SEG6_LOCAL_SRH]);
645 len = nla_len(attrs[SEG6_LOCAL_SRH]);
646
647 /* SRH must contain at least one segment */
648 if (len < sizeof(*srh) + sizeof(struct in6_addr))
649 return -EINVAL;
650
651 if (!seg6_validate_srh(srh, len))
652 return -EINVAL;
653
YueHaibing7fa41ef2018-07-23 16:33:19 +0800654 slwt->srh = kmemdup(srh, len, GFP_KERNEL);
David Lebrun2d9cc602017-08-05 12:38:27 +0200655 if (!slwt->srh)
656 return -ENOMEM;
657
David Lebrun2d9cc602017-08-05 12:38:27 +0200658 slwt->headroom += len;
659
660 return 0;
661}
662
663static int put_nla_srh(struct sk_buff *skb, struct seg6_local_lwt *slwt)
664{
665 struct ipv6_sr_hdr *srh;
666 struct nlattr *nla;
667 int len;
668
669 srh = slwt->srh;
670 len = (srh->hdrlen + 1) << 3;
671
672 nla = nla_reserve(skb, SEG6_LOCAL_SRH, len);
673 if (!nla)
674 return -EMSGSIZE;
675
676 memcpy(nla_data(nla), srh, len);
677
678 return 0;
679}
680
681static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
682{
683 int len = (a->srh->hdrlen + 1) << 3;
684
685 if (len != ((b->srh->hdrlen + 1) << 3))
686 return 1;
687
688 return memcmp(a->srh, b->srh, len);
689}
690
691static int parse_nla_table(struct nlattr **attrs, struct seg6_local_lwt *slwt)
692{
693 slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]);
694
695 return 0;
696}
697
698static int put_nla_table(struct sk_buff *skb, struct seg6_local_lwt *slwt)
699{
700 if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table))
701 return -EMSGSIZE;
702
703 return 0;
704}
705
706static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
707{
708 if (a->table != b->table)
709 return 1;
710
711 return 0;
712}
713
714static int parse_nla_nh4(struct nlattr **attrs, struct seg6_local_lwt *slwt)
715{
716 memcpy(&slwt->nh4, nla_data(attrs[SEG6_LOCAL_NH4]),
717 sizeof(struct in_addr));
718
719 return 0;
720}
721
722static int put_nla_nh4(struct sk_buff *skb, struct seg6_local_lwt *slwt)
723{
724 struct nlattr *nla;
725
726 nla = nla_reserve(skb, SEG6_LOCAL_NH4, sizeof(struct in_addr));
727 if (!nla)
728 return -EMSGSIZE;
729
730 memcpy(nla_data(nla), &slwt->nh4, sizeof(struct in_addr));
731
732 return 0;
733}
734
735static int cmp_nla_nh4(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
736{
737 return memcmp(&a->nh4, &b->nh4, sizeof(struct in_addr));
738}
739
740static int parse_nla_nh6(struct nlattr **attrs, struct seg6_local_lwt *slwt)
741{
742 memcpy(&slwt->nh6, nla_data(attrs[SEG6_LOCAL_NH6]),
743 sizeof(struct in6_addr));
744
745 return 0;
746}
747
748static int put_nla_nh6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
749{
750 struct nlattr *nla;
751
752 nla = nla_reserve(skb, SEG6_LOCAL_NH6, sizeof(struct in6_addr));
753 if (!nla)
754 return -EMSGSIZE;
755
756 memcpy(nla_data(nla), &slwt->nh6, sizeof(struct in6_addr));
757
758 return 0;
759}
760
761static int cmp_nla_nh6(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
762{
763 return memcmp(&a->nh6, &b->nh6, sizeof(struct in6_addr));
764}
765
766static int parse_nla_iif(struct nlattr **attrs, struct seg6_local_lwt *slwt)
767{
768 slwt->iif = nla_get_u32(attrs[SEG6_LOCAL_IIF]);
769
770 return 0;
771}
772
773static int put_nla_iif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
774{
775 if (nla_put_u32(skb, SEG6_LOCAL_IIF, slwt->iif))
776 return -EMSGSIZE;
777
778 return 0;
779}
780
781static int cmp_nla_iif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
782{
783 if (a->iif != b->iif)
784 return 1;
785
786 return 0;
787}
788
789static int parse_nla_oif(struct nlattr **attrs, struct seg6_local_lwt *slwt)
790{
791 slwt->oif = nla_get_u32(attrs[SEG6_LOCAL_OIF]);
792
793 return 0;
794}
795
796static int put_nla_oif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
797{
798 if (nla_put_u32(skb, SEG6_LOCAL_OIF, slwt->oif))
799 return -EMSGSIZE;
800
801 return 0;
802}
803
804static int cmp_nla_oif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
805{
806 if (a->oif != b->oif)
807 return 1;
808
809 return 0;
810}
811
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100812#define MAX_PROG_NAME 256
813static const struct nla_policy bpf_prog_policy[SEG6_LOCAL_BPF_PROG_MAX + 1] = {
814 [SEG6_LOCAL_BPF_PROG] = { .type = NLA_U32, },
815 [SEG6_LOCAL_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
816 .len = MAX_PROG_NAME },
817};
818
819static int parse_nla_bpf(struct nlattr **attrs, struct seg6_local_lwt *slwt)
820{
821 struct nlattr *tb[SEG6_LOCAL_BPF_PROG_MAX + 1];
822 struct bpf_prog *p;
823 int ret;
824 u32 fd;
825
Johannes Berg8cb08172019-04-26 14:07:28 +0200826 ret = nla_parse_nested_deprecated(tb, SEG6_LOCAL_BPF_PROG_MAX,
827 attrs[SEG6_LOCAL_BPF],
828 bpf_prog_policy, NULL);
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100829 if (ret < 0)
830 return ret;
831
832 if (!tb[SEG6_LOCAL_BPF_PROG] || !tb[SEG6_LOCAL_BPF_PROG_NAME])
833 return -EINVAL;
834
835 slwt->bpf.name = nla_memdup(tb[SEG6_LOCAL_BPF_PROG_NAME], GFP_KERNEL);
836 if (!slwt->bpf.name)
837 return -ENOMEM;
838
839 fd = nla_get_u32(tb[SEG6_LOCAL_BPF_PROG]);
840 p = bpf_prog_get_type(fd, BPF_PROG_TYPE_LWT_SEG6LOCAL);
841 if (IS_ERR(p)) {
842 kfree(slwt->bpf.name);
843 return PTR_ERR(p);
844 }
845
846 slwt->bpf.prog = p;
847 return 0;
848}
849
850static int put_nla_bpf(struct sk_buff *skb, struct seg6_local_lwt *slwt)
851{
852 struct nlattr *nest;
853
854 if (!slwt->bpf.prog)
855 return 0;
856
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200857 nest = nla_nest_start_noflag(skb, SEG6_LOCAL_BPF);
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100858 if (!nest)
859 return -EMSGSIZE;
860
861 if (nla_put_u32(skb, SEG6_LOCAL_BPF_PROG, slwt->bpf.prog->aux->id))
862 return -EMSGSIZE;
863
864 if (slwt->bpf.name &&
865 nla_put_string(skb, SEG6_LOCAL_BPF_PROG_NAME, slwt->bpf.name))
866 return -EMSGSIZE;
867
868 return nla_nest_end(skb, nest);
869}
870
871static int cmp_nla_bpf(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
872{
873 if (!a->bpf.name && !b->bpf.name)
874 return 0;
875
876 if (!a->bpf.name || !b->bpf.name)
877 return 1;
878
879 return strcmp(a->bpf.name, b->bpf.name);
880}
881
David Lebrund1df6fd2017-08-05 12:38:26 +0200882struct seg6_action_param {
883 int (*parse)(struct nlattr **attrs, struct seg6_local_lwt *slwt);
884 int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
885 int (*cmp)(struct seg6_local_lwt *a, struct seg6_local_lwt *b);
886};
887
888static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = {
David Lebrun2d9cc602017-08-05 12:38:27 +0200889 [SEG6_LOCAL_SRH] = { .parse = parse_nla_srh,
890 .put = put_nla_srh,
891 .cmp = cmp_nla_srh },
David Lebrund1df6fd2017-08-05 12:38:26 +0200892
David Lebrun2d9cc602017-08-05 12:38:27 +0200893 [SEG6_LOCAL_TABLE] = { .parse = parse_nla_table,
894 .put = put_nla_table,
895 .cmp = cmp_nla_table },
David Lebrund1df6fd2017-08-05 12:38:26 +0200896
David Lebrun2d9cc602017-08-05 12:38:27 +0200897 [SEG6_LOCAL_NH4] = { .parse = parse_nla_nh4,
898 .put = put_nla_nh4,
899 .cmp = cmp_nla_nh4 },
David Lebrund1df6fd2017-08-05 12:38:26 +0200900
David Lebrun2d9cc602017-08-05 12:38:27 +0200901 [SEG6_LOCAL_NH6] = { .parse = parse_nla_nh6,
902 .put = put_nla_nh6,
903 .cmp = cmp_nla_nh6 },
David Lebrund1df6fd2017-08-05 12:38:26 +0200904
David Lebrun2d9cc602017-08-05 12:38:27 +0200905 [SEG6_LOCAL_IIF] = { .parse = parse_nla_iif,
906 .put = put_nla_iif,
907 .cmp = cmp_nla_iif },
David Lebrund1df6fd2017-08-05 12:38:26 +0200908
David Lebrun2d9cc602017-08-05 12:38:27 +0200909 [SEG6_LOCAL_OIF] = { .parse = parse_nla_oif,
910 .put = put_nla_oif,
911 .cmp = cmp_nla_oif },
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +0100912
913 [SEG6_LOCAL_BPF] = { .parse = parse_nla_bpf,
914 .put = put_nla_bpf,
915 .cmp = cmp_nla_bpf },
916
David Lebrund1df6fd2017-08-05 12:38:26 +0200917};
918
919static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
920{
921 struct seg6_action_param *param;
922 struct seg6_action_desc *desc;
923 int i, err;
924
925 desc = __get_action_desc(slwt->action);
926 if (!desc)
927 return -EINVAL;
928
929 if (!desc->input)
930 return -EOPNOTSUPP;
931
932 slwt->desc = desc;
933 slwt->headroom += desc->static_headroom;
934
935 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
936 if (desc->attrs & (1 << i)) {
937 if (!attrs[i])
938 return -EINVAL;
939
940 param = &seg6_action_params[i];
941
942 err = param->parse(attrs, slwt);
943 if (err < 0)
944 return err;
945 }
946 }
947
948 return 0;
949}
950
951static int seg6_local_build_state(struct nlattr *nla, unsigned int family,
952 const void *cfg, struct lwtunnel_state **ts,
953 struct netlink_ext_ack *extack)
954{
955 struct nlattr *tb[SEG6_LOCAL_MAX + 1];
956 struct lwtunnel_state *newts;
957 struct seg6_local_lwt *slwt;
958 int err;
959
David Lebrun62852172017-08-25 09:56:46 +0200960 if (family != AF_INET6)
961 return -EINVAL;
962
Johannes Berg8cb08172019-04-26 14:07:28 +0200963 err = nla_parse_nested_deprecated(tb, SEG6_LOCAL_MAX, nla,
964 seg6_local_policy, extack);
David Lebrund1df6fd2017-08-05 12:38:26 +0200965
966 if (err < 0)
967 return err;
968
969 if (!tb[SEG6_LOCAL_ACTION])
970 return -EINVAL;
971
972 newts = lwtunnel_state_alloc(sizeof(*slwt));
973 if (!newts)
974 return -ENOMEM;
975
976 slwt = seg6_local_lwtunnel(newts);
977 slwt->action = nla_get_u32(tb[SEG6_LOCAL_ACTION]);
978
979 err = parse_nla_action(tb, slwt);
980 if (err < 0)
981 goto out_free;
982
983 newts->type = LWTUNNEL_ENCAP_SEG6_LOCAL;
984 newts->flags = LWTUNNEL_STATE_INPUT_REDIRECT;
985 newts->headroom = slwt->headroom;
986
987 *ts = newts;
988
989 return 0;
990
991out_free:
992 kfree(slwt->srh);
993 kfree(newts);
994 return err;
995}
996
997static void seg6_local_destroy_state(struct lwtunnel_state *lwt)
998{
999 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1000
1001 kfree(slwt->srh);
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001002
1003 if (slwt->desc->attrs & (1 << SEG6_LOCAL_BPF)) {
1004 kfree(slwt->bpf.name);
1005 bpf_prog_put(slwt->bpf.prog);
1006 }
1007
1008 return;
David Lebrund1df6fd2017-08-05 12:38:26 +02001009}
1010
1011static int seg6_local_fill_encap(struct sk_buff *skb,
1012 struct lwtunnel_state *lwt)
1013{
1014 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1015 struct seg6_action_param *param;
1016 int i, err;
1017
1018 if (nla_put_u32(skb, SEG6_LOCAL_ACTION, slwt->action))
1019 return -EMSGSIZE;
1020
1021 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
1022 if (slwt->desc->attrs & (1 << i)) {
1023 param = &seg6_action_params[i];
1024 err = param->put(skb, slwt);
1025 if (err < 0)
1026 return err;
1027 }
1028 }
1029
1030 return 0;
1031}
1032
1033static int seg6_local_get_encap_size(struct lwtunnel_state *lwt)
1034{
1035 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1036 unsigned long attrs;
1037 int nlsize;
1038
1039 nlsize = nla_total_size(4); /* action */
1040
1041 attrs = slwt->desc->attrs;
1042
1043 if (attrs & (1 << SEG6_LOCAL_SRH))
1044 nlsize += nla_total_size((slwt->srh->hdrlen + 1) << 3);
1045
1046 if (attrs & (1 << SEG6_LOCAL_TABLE))
1047 nlsize += nla_total_size(4);
1048
1049 if (attrs & (1 << SEG6_LOCAL_NH4))
1050 nlsize += nla_total_size(4);
1051
1052 if (attrs & (1 << SEG6_LOCAL_NH6))
1053 nlsize += nla_total_size(16);
1054
1055 if (attrs & (1 << SEG6_LOCAL_IIF))
1056 nlsize += nla_total_size(4);
1057
1058 if (attrs & (1 << SEG6_LOCAL_OIF))
1059 nlsize += nla_total_size(4);
1060
Mathieu Xhonneux004d4b22018-05-20 14:58:16 +01001061 if (attrs & (1 << SEG6_LOCAL_BPF))
1062 nlsize += nla_total_size(sizeof(struct nlattr)) +
1063 nla_total_size(MAX_PROG_NAME) +
1064 nla_total_size(4);
1065
David Lebrund1df6fd2017-08-05 12:38:26 +02001066 return nlsize;
1067}
1068
1069static int seg6_local_cmp_encap(struct lwtunnel_state *a,
1070 struct lwtunnel_state *b)
1071{
1072 struct seg6_local_lwt *slwt_a, *slwt_b;
1073 struct seg6_action_param *param;
1074 int i;
1075
1076 slwt_a = seg6_local_lwtunnel(a);
1077 slwt_b = seg6_local_lwtunnel(b);
1078
1079 if (slwt_a->action != slwt_b->action)
1080 return 1;
1081
1082 if (slwt_a->desc->attrs != slwt_b->desc->attrs)
1083 return 1;
1084
1085 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
1086 if (slwt_a->desc->attrs & (1 << i)) {
1087 param = &seg6_action_params[i];
1088 if (param->cmp(slwt_a, slwt_b))
1089 return 1;
1090 }
1091 }
1092
1093 return 0;
1094}
1095
1096static const struct lwtunnel_encap_ops seg6_local_ops = {
1097 .build_state = seg6_local_build_state,
1098 .destroy_state = seg6_local_destroy_state,
1099 .input = seg6_local_input,
1100 .fill_encap = seg6_local_fill_encap,
1101 .get_encap_size = seg6_local_get_encap_size,
1102 .cmp_encap = seg6_local_cmp_encap,
1103 .owner = THIS_MODULE,
1104};
1105
1106int __init seg6_local_init(void)
1107{
1108 return lwtunnel_encap_add_ops(&seg6_local_ops,
1109 LWTUNNEL_ENCAP_SEG6_LOCAL);
1110}
1111
1112void seg6_local_exit(void)
1113{
1114 lwtunnel_encap_del_ops(&seg6_local_ops, LWTUNNEL_ENCAP_SEG6_LOCAL);
1115}