blob: 2fe2f761a0d05148446c4e9201067dea0dd00f60 [file] [log] [blame]
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +02001/* XDP monitor tool, based on tracepoints
2 *
3 * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 */
5#include <uapi/linux/bpf.h>
6#include "bpf_helpers.h"
7
8struct bpf_map_def SEC("maps") redirect_err_cnt = {
9 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
10 .key_size = sizeof(u32),
11 .value_size = sizeof(u64),
12 .max_entries = 2,
13 /* TODO: have entries for all possible errno's */
14};
15
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +020016#define XDP_UNKNOWN XDP_REDIRECT + 1
17struct bpf_map_def SEC("maps") exception_cnt = {
18 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
19 .key_size = sizeof(u32),
20 .value_size = sizeof(u64),
21 .max_entries = XDP_UNKNOWN + 1,
22};
23
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020024/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
25 * Code in: kernel/include/trace/events/xdp.h
26 */
27struct xdp_redirect_ctx {
Jesper Dangaard Brouerf4ce0a02017-10-06 10:41:41 +020028 u64 __pad; // First 8 bytes are not accessible by bpf code
29 int prog_id; // offset:8; size:4; signed:1;
30 u32 act; // offset:12 size:4; signed:0;
31 int ifindex; // offset:16 size:4; signed:1;
32 int err; // offset:20 size:4; signed:1;
33 int to_ifindex; // offset:24 size:4; signed:1;
34 u32 map_id; // offset:28 size:4; signed:0;
35 int map_index; // offset:32 size:4; signed:1;
36}; // offset:36
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020037
38enum {
39 XDP_REDIRECT_SUCCESS = 0,
40 XDP_REDIRECT_ERROR = 1
41};
42
43static __always_inline
44int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
45{
46 u32 key = XDP_REDIRECT_ERROR;
47 int err = ctx->err;
48 u64 *cnt;
49
50 if (!err)
51 key = XDP_REDIRECT_SUCCESS;
52
53 cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key);
54 if (!cnt)
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +020055 return 1;
Jesper Dangaard Brouer3ffab542017-08-29 16:38:11 +020056 *cnt += 1;
57
58 return 0; /* Indicate event was filtered (no further processing)*/
59 /*
60 * Returning 1 here would allow e.g. a perf-record tracepoint
61 * to see and record these events, but it doesn't work well
62 * in-practice as stopping perf-record also unload this
63 * bpf_prog. Plus, there is additional overhead of doing so.
64 */
65}
66
67SEC("tracepoint/xdp/xdp_redirect_err")
68int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
69{
70 return xdp_redirect_collect_stat(ctx);
71}
72
73
74SEC("tracepoint/xdp/xdp_redirect_map_err")
75int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
76{
77 return xdp_redirect_collect_stat(ctx);
78}
79
80/* Likely unloaded when prog starts */
81SEC("tracepoint/xdp/xdp_redirect")
82int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
83{
84 return xdp_redirect_collect_stat(ctx);
85}
86
87/* Likely unloaded when prog starts */
88SEC("tracepoint/xdp/xdp_redirect_map")
89int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
90{
91 return xdp_redirect_collect_stat(ctx);
92}
Jesper Dangaard Brouer280b0582017-10-06 10:41:46 +020093
94/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
95 * Code in: kernel/include/trace/events/xdp.h
96 */
97struct xdp_exception_ctx {
98 u64 __pad; // First 8 bytes are not accessible by bpf code
99 int prog_id; // offset:8; size:4; signed:1;
100 u32 act; // offset:12; size:4; signed:0;
101 int ifindex; // offset:16; size:4; signed:1;
102};
103
104SEC("tracepoint/xdp/xdp_exception")
105int trace_xdp_exception(struct xdp_exception_ctx *ctx)
106{
107 u64 *cnt;;
108 u32 key;
109
110 key = ctx->act;
111 if (key > XDP_REDIRECT)
112 key = XDP_UNKNOWN;
113
114 cnt = bpf_map_lookup_elem(&exception_cnt, &key);
115 if (!cnt)
116 return 1;
117 *cnt += 1;
118
119 return 0;
120}