Jesper Dangaard Brouer | 3ffab54 | 2017-08-29 16:38:11 +0200 | [diff] [blame] | 1 | /* XDP monitor tool, based on tracepoints |
| 2 | * |
| 3 | * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat Inc. |
| 4 | */ |
| 5 | #include <uapi/linux/bpf.h> |
| 6 | #include "bpf_helpers.h" |
| 7 | |
| 8 | struct bpf_map_def SEC("maps") redirect_err_cnt = { |
| 9 | .type = BPF_MAP_TYPE_PERCPU_ARRAY, |
| 10 | .key_size = sizeof(u32), |
| 11 | .value_size = sizeof(u64), |
| 12 | .max_entries = 2, |
| 13 | /* TODO: have entries for all possible errno's */ |
| 14 | }; |
| 15 | |
| 16 | /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format |
| 17 | * Code in: kernel/include/trace/events/xdp.h |
| 18 | */ |
| 19 | struct xdp_redirect_ctx { |
Jesper Dangaard Brouer | f4ce0a0 | 2017-10-06 10:41:41 +0200 | [diff] [blame^] | 20 | u64 __pad; // First 8 bytes are not accessible by bpf code |
| 21 | int prog_id; // offset:8; size:4; signed:1; |
| 22 | u32 act; // offset:12 size:4; signed:0; |
| 23 | int ifindex; // offset:16 size:4; signed:1; |
| 24 | int err; // offset:20 size:4; signed:1; |
| 25 | int to_ifindex; // offset:24 size:4; signed:1; |
| 26 | u32 map_id; // offset:28 size:4; signed:0; |
| 27 | int map_index; // offset:32 size:4; signed:1; |
| 28 | }; // offset:36 |
Jesper Dangaard Brouer | 3ffab54 | 2017-08-29 16:38:11 +0200 | [diff] [blame] | 29 | |
| 30 | enum { |
| 31 | XDP_REDIRECT_SUCCESS = 0, |
| 32 | XDP_REDIRECT_ERROR = 1 |
| 33 | }; |
| 34 | |
| 35 | static __always_inline |
| 36 | int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx) |
| 37 | { |
| 38 | u32 key = XDP_REDIRECT_ERROR; |
| 39 | int err = ctx->err; |
| 40 | u64 *cnt; |
| 41 | |
| 42 | if (!err) |
| 43 | key = XDP_REDIRECT_SUCCESS; |
| 44 | |
| 45 | cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key); |
| 46 | if (!cnt) |
| 47 | return 0; |
| 48 | *cnt += 1; |
| 49 | |
| 50 | return 0; /* Indicate event was filtered (no further processing)*/ |
| 51 | /* |
| 52 | * Returning 1 here would allow e.g. a perf-record tracepoint |
| 53 | * to see and record these events, but it doesn't work well |
| 54 | * in-practice as stopping perf-record also unload this |
| 55 | * bpf_prog. Plus, there is additional overhead of doing so. |
| 56 | */ |
| 57 | } |
| 58 | |
| 59 | SEC("tracepoint/xdp/xdp_redirect_err") |
| 60 | int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx) |
| 61 | { |
| 62 | return xdp_redirect_collect_stat(ctx); |
| 63 | } |
| 64 | |
| 65 | |
| 66 | SEC("tracepoint/xdp/xdp_redirect_map_err") |
| 67 | int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx) |
| 68 | { |
| 69 | return xdp_redirect_collect_stat(ctx); |
| 70 | } |
| 71 | |
| 72 | /* Likely unloaded when prog starts */ |
| 73 | SEC("tracepoint/xdp/xdp_redirect") |
| 74 | int trace_xdp_redirect(struct xdp_redirect_ctx *ctx) |
| 75 | { |
| 76 | return xdp_redirect_collect_stat(ctx); |
| 77 | } |
| 78 | |
| 79 | /* Likely unloaded when prog starts */ |
| 80 | SEC("tracepoint/xdp/xdp_redirect_map") |
| 81 | int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx) |
| 82 | { |
| 83 | return xdp_redirect_collect_stat(ctx); |
| 84 | } |