Kumar Kartikeya Dwivedi | 3231403 | 2021-08-21 05:49:52 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #ifndef _XDP_SAMPLE_BPF_H |
| 3 | #define _XDP_SAMPLE_BPF_H |
| 4 | |
| 5 | #include "vmlinux.h" |
| 6 | #include <bpf/bpf_tracing.h> |
| 7 | #include <bpf/bpf_core_read.h> |
| 8 | #include <bpf/bpf_helpers.h> |
| 9 | |
| 10 | #include "xdp_sample_shared.h" |
| 11 | |
| 12 | #define ETH_ALEN 6 |
| 13 | #define ETH_P_802_3_MIN 0x0600 |
| 14 | #define ETH_P_8021Q 0x8100 |
| 15 | #define ETH_P_8021AD 0x88A8 |
| 16 | #define ETH_P_IP 0x0800 |
| 17 | #define ETH_P_IPV6 0x86DD |
| 18 | #define ETH_P_ARP 0x0806 |
| 19 | #define IPPROTO_ICMPV6 58 |
| 20 | |
| 21 | #define EINVAL 22 |
| 22 | #define ENETDOWN 100 |
| 23 | #define EMSGSIZE 90 |
| 24 | #define EOPNOTSUPP 95 |
| 25 | #define ENOSPC 28 |
| 26 | |
| 27 | typedef struct { |
| 28 | __uint(type, BPF_MAP_TYPE_ARRAY); |
| 29 | __uint(map_flags, BPF_F_MMAPABLE); |
| 30 | __type(key, unsigned int); |
| 31 | __type(value, struct datarec); |
| 32 | } array_map; |
| 33 | |
| 34 | extern array_map rx_cnt; |
| 35 | extern const volatile int nr_cpus; |
| 36 | |
| 37 | enum { |
| 38 | XDP_REDIRECT_SUCCESS = 0, |
| 39 | XDP_REDIRECT_ERROR = 1 |
| 40 | }; |
| 41 | |
| 42 | static __always_inline void swap_src_dst_mac(void *data) |
| 43 | { |
| 44 | unsigned short *p = data; |
| 45 | unsigned short dst[3]; |
| 46 | |
| 47 | dst[0] = p[0]; |
| 48 | dst[1] = p[1]; |
| 49 | dst[2] = p[2]; |
| 50 | p[0] = p[3]; |
| 51 | p[1] = p[4]; |
| 52 | p[2] = p[5]; |
| 53 | p[3] = dst[0]; |
| 54 | p[4] = dst[1]; |
| 55 | p[5] = dst[2]; |
| 56 | } |
| 57 | |
| 58 | #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ |
| 59 | __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ |
| 60 | #define bpf_ntohs(x) __builtin_bswap16(x) |
| 61 | #define bpf_htons(x) __builtin_bswap16(x) |
| 62 | #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ |
| 63 | __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
| 64 | #define bpf_ntohs(x) (x) |
| 65 | #define bpf_htons(x) (x) |
| 66 | #else |
| 67 | # error "Endianness detection needs to be set up for your compiler?!" |
| 68 | #endif |
| 69 | |
| 70 | /* |
| 71 | * Note: including linux/compiler.h or linux/kernel.h for the macros below |
| 72 | * conflicts with vmlinux.h include in BPF files, so we define them here. |
| 73 | * |
| 74 | * Following functions are taken from kernel sources and |
| 75 | * break aliasing rules in their original form. |
| 76 | * |
| 77 | * While kernel is compiled with -fno-strict-aliasing, |
| 78 | * perf uses -Wstrict-aliasing=3 which makes build fail |
| 79 | * under gcc 4.4. |
| 80 | * |
| 81 | * Using extra __may_alias__ type to allow aliasing |
| 82 | * in this case. |
| 83 | */ |
| 84 | typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; |
| 85 | typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; |
| 86 | typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; |
| 87 | typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; |
| 88 | |
| 89 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) |
| 90 | { |
| 91 | switch (size) { |
| 92 | case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; |
| 93 | case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; |
| 94 | case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; |
| 95 | case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; |
| 96 | default: |
| 97 | asm volatile ("" : : : "memory"); |
| 98 | __builtin_memcpy((void *)res, (const void *)p, size); |
| 99 | asm volatile ("" : : : "memory"); |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
| 104 | { |
| 105 | switch (size) { |
| 106 | case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; |
| 107 | case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; |
| 108 | case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; |
| 109 | case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; |
| 110 | default: |
| 111 | asm volatile ("" : : : "memory"); |
| 112 | __builtin_memcpy((void *)p, (const void *)res, size); |
| 113 | asm volatile ("" : : : "memory"); |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | #define READ_ONCE(x) \ |
| 118 | ({ \ |
| 119 | union { typeof(x) __val; char __c[1]; } __u = \ |
| 120 | { .__c = { 0 } }; \ |
| 121 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
| 122 | __u.__val; \ |
| 123 | }) |
| 124 | |
| 125 | #define WRITE_ONCE(x, val) \ |
| 126 | ({ \ |
| 127 | union { typeof(x) __val; char __c[1]; } __u = \ |
| 128 | { .__val = (val) }; \ |
| 129 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
| 130 | __u.__val; \ |
| 131 | }) |
| 132 | |
| 133 | /* Add a value using relaxed read and relaxed write. Less expensive than |
| 134 | * fetch_add when there is no write concurrency. |
| 135 | */ |
| 136 | #define NO_TEAR_ADD(x, val) WRITE_ONCE((x), READ_ONCE(x) + (val)) |
| 137 | #define NO_TEAR_INC(x) NO_TEAR_ADD((x), 1) |
| 138 | |
| 139 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) |
| 140 | |
| 141 | #endif |