Nikita V. Shirokov | c6ffd1f | 2018-04-17 21:42:23 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 |
| 2 | * Copyright (c) 2018 Facebook |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of version 2 of the GNU General Public |
| 6 | * License as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program shows how to use bpf_xdp_adjust_tail() by |
| 9 | * generating ICMPv4 "packet to big" (unreachable/ df bit set frag needed |
| 10 | * to be more preice in case of v4)" where receiving packets bigger then |
| 11 | * 600 bytes. |
| 12 | */ |
| 13 | #define KBUILD_MODNAME "foo" |
| 14 | #include <uapi/linux/bpf.h> |
| 15 | #include <linux/in.h> |
| 16 | #include <linux/if_ether.h> |
| 17 | #include <linux/if_packet.h> |
| 18 | #include <linux/if_vlan.h> |
| 19 | #include <linux/ip.h> |
| 20 | #include <linux/icmp.h> |
Toke Høiland-Jørgensen | 7cf245a | 2020-01-20 14:06:49 +0100 | [diff] [blame] | 21 | #include <bpf/bpf_helpers.h> |
Nikita V. Shirokov | c6ffd1f | 2018-04-17 21:42:23 -0700 | [diff] [blame] | 22 | |
| 23 | #define DEFAULT_TTL 64 |
| 24 | #define MAX_PCKT_SIZE 600 |
| 25 | #define ICMP_TOOBIG_SIZE 98 |
| 26 | #define ICMP_TOOBIG_PAYLOAD_SIZE 92 |
| 27 | |
Daniel T. Lee | 8fdf5b7 | 2019-10-08 02:21:17 +0900 | [diff] [blame] | 28 | /* volatile to prevent compiler optimizations */ |
| 29 | static volatile __u32 max_pcktsz = MAX_PCKT_SIZE; |
| 30 | |
Daniel T. Lee | 451d1dc | 2019-11-07 09:51:53 +0900 | [diff] [blame] | 31 | struct { |
| 32 | __uint(type, BPF_MAP_TYPE_ARRAY); |
| 33 | __type(key, __u32); |
| 34 | __type(value, __u64); |
| 35 | __uint(max_entries, 1); |
| 36 | } icmpcnt SEC(".maps"); |
Nikita V. Shirokov | c6ffd1f | 2018-04-17 21:42:23 -0700 | [diff] [blame] | 37 | |
| 38 | static __always_inline void count_icmp(void) |
| 39 | { |
| 40 | u64 key = 0; |
| 41 | u64 *icmp_count; |
| 42 | |
| 43 | icmp_count = bpf_map_lookup_elem(&icmpcnt, &key); |
| 44 | if (icmp_count) |
| 45 | *icmp_count += 1; |
| 46 | } |
| 47 | |
| 48 | static __always_inline void swap_mac(void *data, struct ethhdr *orig_eth) |
| 49 | { |
| 50 | struct ethhdr *eth; |
| 51 | |
| 52 | eth = data; |
| 53 | memcpy(eth->h_source, orig_eth->h_dest, ETH_ALEN); |
| 54 | memcpy(eth->h_dest, orig_eth->h_source, ETH_ALEN); |
| 55 | eth->h_proto = orig_eth->h_proto; |
| 56 | } |
| 57 | |
| 58 | static __always_inline __u16 csum_fold_helper(__u32 csum) |
| 59 | { |
| 60 | return ~((csum & 0xffff) + (csum >> 16)); |
| 61 | } |
| 62 | |
| 63 | static __always_inline void ipv4_csum(void *data_start, int data_size, |
| 64 | __u32 *csum) |
| 65 | { |
| 66 | *csum = bpf_csum_diff(0, 0, data_start, data_size, *csum); |
| 67 | *csum = csum_fold_helper(*csum); |
| 68 | } |
| 69 | |
| 70 | static __always_inline int send_icmp4_too_big(struct xdp_md *xdp) |
| 71 | { |
| 72 | int headroom = (int)sizeof(struct iphdr) + (int)sizeof(struct icmphdr); |
| 73 | |
| 74 | if (bpf_xdp_adjust_head(xdp, 0 - headroom)) |
| 75 | return XDP_DROP; |
| 76 | void *data = (void *)(long)xdp->data; |
| 77 | void *data_end = (void *)(long)xdp->data_end; |
| 78 | |
| 79 | if (data + (ICMP_TOOBIG_SIZE + headroom) > data_end) |
| 80 | return XDP_DROP; |
| 81 | |
| 82 | struct iphdr *iph, *orig_iph; |
| 83 | struct icmphdr *icmp_hdr; |
| 84 | struct ethhdr *orig_eth; |
| 85 | __u32 csum = 0; |
| 86 | __u64 off = 0; |
| 87 | |
| 88 | orig_eth = data + headroom; |
| 89 | swap_mac(data, orig_eth); |
| 90 | off += sizeof(struct ethhdr); |
| 91 | iph = data + off; |
| 92 | off += sizeof(struct iphdr); |
| 93 | icmp_hdr = data + off; |
| 94 | off += sizeof(struct icmphdr); |
| 95 | orig_iph = data + off; |
| 96 | icmp_hdr->type = ICMP_DEST_UNREACH; |
| 97 | icmp_hdr->code = ICMP_FRAG_NEEDED; |
Daniel T. Lee | 8fdf5b7 | 2019-10-08 02:21:17 +0900 | [diff] [blame] | 98 | icmp_hdr->un.frag.mtu = htons(max_pcktsz - sizeof(struct ethhdr)); |
Nikita V. Shirokov | c6ffd1f | 2018-04-17 21:42:23 -0700 | [diff] [blame] | 99 | icmp_hdr->checksum = 0; |
| 100 | ipv4_csum(icmp_hdr, ICMP_TOOBIG_PAYLOAD_SIZE, &csum); |
| 101 | icmp_hdr->checksum = csum; |
| 102 | iph->ttl = DEFAULT_TTL; |
| 103 | iph->daddr = orig_iph->saddr; |
| 104 | iph->saddr = orig_iph->daddr; |
| 105 | iph->version = 4; |
| 106 | iph->ihl = 5; |
| 107 | iph->protocol = IPPROTO_ICMP; |
| 108 | iph->tos = 0; |
| 109 | iph->tot_len = htons( |
| 110 | ICMP_TOOBIG_SIZE + headroom - sizeof(struct ethhdr)); |
| 111 | iph->check = 0; |
| 112 | csum = 0; |
| 113 | ipv4_csum(iph, sizeof(struct iphdr), &csum); |
| 114 | iph->check = csum; |
| 115 | count_icmp(); |
| 116 | return XDP_TX; |
| 117 | } |
| 118 | |
| 119 | |
| 120 | static __always_inline int handle_ipv4(struct xdp_md *xdp) |
| 121 | { |
| 122 | void *data_end = (void *)(long)xdp->data_end; |
| 123 | void *data = (void *)(long)xdp->data; |
| 124 | int pckt_size = data_end - data; |
| 125 | int offset; |
| 126 | |
Daniel T. Lee | 8fdf5b7 | 2019-10-08 02:21:17 +0900 | [diff] [blame] | 127 | if (pckt_size > max(max_pcktsz, ICMP_TOOBIG_SIZE)) { |
Nikita V. Shirokov | c6ffd1f | 2018-04-17 21:42:23 -0700 | [diff] [blame] | 128 | offset = pckt_size - ICMP_TOOBIG_SIZE; |
| 129 | if (bpf_xdp_adjust_tail(xdp, 0 - offset)) |
| 130 | return XDP_PASS; |
| 131 | return send_icmp4_too_big(xdp); |
| 132 | } |
| 133 | return XDP_PASS; |
| 134 | } |
| 135 | |
| 136 | SEC("xdp_icmp") |
| 137 | int _xdp_icmp(struct xdp_md *xdp) |
| 138 | { |
| 139 | void *data_end = (void *)(long)xdp->data_end; |
| 140 | void *data = (void *)(long)xdp->data; |
| 141 | struct ethhdr *eth = data; |
| 142 | __u16 h_proto; |
| 143 | |
| 144 | if (eth + 1 > data_end) |
| 145 | return XDP_DROP; |
| 146 | |
| 147 | h_proto = eth->h_proto; |
| 148 | |
| 149 | if (h_proto == htons(ETH_P_IP)) |
| 150 | return handle_ipv4(xdp); |
| 151 | else |
| 152 | return XDP_PASS; |
| 153 | } |
| 154 | |
| 155 | char _license[] SEC("license") = "GPL"; |