Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #undef TRACE_SYSTEM |
| 3 | #define TRACE_SYSTEM io_uring |
| 4 | |
| 5 | #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ) |
| 6 | #define _TRACE_IO_URING_H |
| 7 | |
| 8 | #include <linux/tracepoint.h> |
| 9 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 10 | struct io_wq_work; |
| 11 | |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 12 | /** |
| 13 | * io_uring_create - called after a new io_uring context was prepared |
| 14 | * |
| 15 | * @fd: corresponding file descriptor |
| 16 | * @ctx: pointer to a ring context structure |
| 17 | * @sq_entries: actual SQ size |
| 18 | * @cq_entries: actual CQ size |
| 19 | * @flags: SQ ring flags, provided to io_uring_setup(2) |
| 20 | * |
| 21 | * Allows to trace io_uring creation and provide pointer to a context, that can |
| 22 | * be used later to find correlated events. |
| 23 | */ |
| 24 | TRACE_EVENT(io_uring_create, |
| 25 | |
| 26 | TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), |
| 27 | |
| 28 | TP_ARGS(fd, ctx, sq_entries, cq_entries, flags), |
| 29 | |
| 30 | TP_STRUCT__entry ( |
| 31 | __field( int, fd ) |
| 32 | __field( void *, ctx ) |
| 33 | __field( u32, sq_entries ) |
| 34 | __field( u32, cq_entries ) |
| 35 | __field( u32, flags ) |
| 36 | ), |
| 37 | |
| 38 | TP_fast_assign( |
| 39 | __entry->fd = fd; |
| 40 | __entry->ctx = ctx; |
| 41 | __entry->sq_entries = sq_entries; |
| 42 | __entry->cq_entries = cq_entries; |
| 43 | __entry->flags = flags; |
| 44 | ), |
| 45 | |
| 46 | TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d", |
| 47 | __entry->ctx, __entry->fd, __entry->sq_entries, |
| 48 | __entry->cq_entries, __entry->flags) |
| 49 | ); |
| 50 | |
| 51 | /** |
| 52 | * io_uring_register - called after a buffer/file/eventfd was succesfully |
| 53 | * registered for a ring |
| 54 | * |
| 55 | * @ctx: pointer to a ring context structure |
| 56 | * @opcode: describes which operation to perform |
| 57 | * @nr_user_files: number of registered files |
| 58 | * @nr_user_bufs: number of registered buffers |
| 59 | * @cq_ev_fd: whether eventfs registered or not |
| 60 | * @ret: return code |
| 61 | * |
| 62 | * Allows to trace fixed files/buffers/eventfds, that could be registered to |
| 63 | * avoid an overhead of getting references to them for every operation. This |
| 64 | * event, together with io_uring_file_get, can provide a full picture of how |
| 65 | * much overhead one can reduce via fixing. |
| 66 | */ |
| 67 | TRACE_EVENT(io_uring_register, |
| 68 | |
| 69 | TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files, |
| 70 | unsigned nr_bufs, bool eventfd, long ret), |
| 71 | |
| 72 | TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret), |
| 73 | |
| 74 | TP_STRUCT__entry ( |
| 75 | __field( void *, ctx ) |
| 76 | __field( unsigned, opcode ) |
| 77 | __field( unsigned, nr_files ) |
| 78 | __field( unsigned, nr_bufs ) |
| 79 | __field( bool, eventfd ) |
| 80 | __field( long, ret ) |
| 81 | ), |
| 82 | |
| 83 | TP_fast_assign( |
| 84 | __entry->ctx = ctx; |
| 85 | __entry->opcode = opcode; |
| 86 | __entry->nr_files = nr_files; |
| 87 | __entry->nr_bufs = nr_bufs; |
| 88 | __entry->eventfd = eventfd; |
| 89 | __entry->ret = ret; |
| 90 | ), |
| 91 | |
| 92 | TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " |
| 93 | "eventfd %d, ret %ld", |
| 94 | __entry->ctx, __entry->opcode, __entry->nr_files, |
| 95 | __entry->nr_bufs, __entry->eventfd, __entry->ret) |
| 96 | ); |
| 97 | |
| 98 | /** |
| 99 | * io_uring_file_get - called before getting references to an SQE file |
| 100 | * |
| 101 | * @ctx: pointer to a ring context structure |
| 102 | * @fd: SQE file descriptor |
| 103 | * |
| 104 | * Allows to trace out how often an SQE file reference is obtained, which can |
| 105 | * help figuring out if it makes sense to use fixed files, or check that fixed |
| 106 | * files are used correctly. |
| 107 | */ |
| 108 | TRACE_EVENT(io_uring_file_get, |
| 109 | |
| 110 | TP_PROTO(void *ctx, int fd), |
| 111 | |
| 112 | TP_ARGS(ctx, fd), |
| 113 | |
| 114 | TP_STRUCT__entry ( |
| 115 | __field( void *, ctx ) |
| 116 | __field( int, fd ) |
| 117 | ), |
| 118 | |
| 119 | TP_fast_assign( |
| 120 | __entry->ctx = ctx; |
| 121 | __entry->fd = fd; |
| 122 | ), |
| 123 | |
| 124 | TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd) |
| 125 | ); |
| 126 | |
| 127 | /** |
| 128 | * io_uring_queue_async_work - called before submitting a new async work |
| 129 | * |
| 130 | * @ctx: pointer to a ring context structure |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 131 | * @hashed: type of workqueue, hashed or normal |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 132 | * @req: pointer to a submitted request |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 133 | * @work: pointer to a submitted io_wq_work |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 134 | * |
| 135 | * Allows to trace asynchronous work submission. |
| 136 | */ |
| 137 | TRACE_EVENT(io_uring_queue_async_work, |
| 138 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 139 | TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work, |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 140 | unsigned int flags), |
| 141 | |
| 142 | TP_ARGS(ctx, rw, req, work, flags), |
| 143 | |
| 144 | TP_STRUCT__entry ( |
| 145 | __field( void *, ctx ) |
| 146 | __field( int, rw ) |
| 147 | __field( void *, req ) |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 148 | __field( struct io_wq_work *, work ) |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 149 | __field( unsigned int, flags ) |
| 150 | ), |
| 151 | |
| 152 | TP_fast_assign( |
| 153 | __entry->ctx = ctx; |
| 154 | __entry->rw = rw; |
| 155 | __entry->req = req; |
| 156 | __entry->work = work; |
| 157 | __entry->flags = flags; |
| 158 | ), |
| 159 | |
| 160 | TP_printk("ring %p, request %p, flags %d, %s queue, work %p", |
| 161 | __entry->ctx, __entry->req, __entry->flags, |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 162 | __entry->rw ? "hashed" : "normal", __entry->work) |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 163 | ); |
| 164 | |
| 165 | /** |
| 166 | * io_uring_defer_list - called before the io_uring work added into defer_list |
| 167 | * |
| 168 | * @ctx: pointer to a ring context structure |
| 169 | * @req: pointer to a deferred request |
| 170 | * @shadow: whether request is shadow or not |
| 171 | * |
| 172 | * Allows to track deferred requests, to get an insight about what requests are |
| 173 | * not started immediately. |
| 174 | */ |
| 175 | TRACE_EVENT(io_uring_defer, |
| 176 | |
| 177 | TP_PROTO(void *ctx, void *req, bool shadow), |
| 178 | |
| 179 | TP_ARGS(ctx, req, shadow), |
| 180 | |
| 181 | TP_STRUCT__entry ( |
| 182 | __field( void *, ctx ) |
| 183 | __field( void *, req ) |
| 184 | __field( bool, shadow ) |
| 185 | ), |
| 186 | |
| 187 | TP_fast_assign( |
| 188 | __entry->ctx = ctx; |
| 189 | __entry->req = req; |
| 190 | __entry->shadow = shadow; |
| 191 | ), |
| 192 | |
| 193 | TP_printk("ring %p, request %p%s", __entry->ctx, __entry->req, |
| 194 | __entry->shadow ? ", shadow": "") |
| 195 | ); |
| 196 | |
| 197 | /** |
| 198 | * io_uring_link - called before the io_uring request added into link_list of |
| 199 | * another request |
| 200 | * |
| 201 | * @ctx: pointer to a ring context structure |
| 202 | * @req: pointer to a linked request |
| 203 | * @target_req: pointer to a previous request, that would contain @req |
| 204 | * |
| 205 | * Allows to track linked requests, to understand dependencies between requests |
| 206 | * and how does it influence their execution flow. |
| 207 | */ |
| 208 | TRACE_EVENT(io_uring_link, |
| 209 | |
| 210 | TP_PROTO(void *ctx, void *req, void *target_req), |
| 211 | |
| 212 | TP_ARGS(ctx, req, target_req), |
| 213 | |
| 214 | TP_STRUCT__entry ( |
| 215 | __field( void *, ctx ) |
| 216 | __field( void *, req ) |
| 217 | __field( void *, target_req ) |
| 218 | ), |
| 219 | |
| 220 | TP_fast_assign( |
| 221 | __entry->ctx = ctx; |
| 222 | __entry->req = req; |
| 223 | __entry->target_req = target_req; |
| 224 | ), |
| 225 | |
| 226 | TP_printk("ring %p, request %p linked after %p", |
| 227 | __entry->ctx, __entry->req, __entry->target_req) |
| 228 | ); |
| 229 | |
| 230 | /** |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 231 | * io_uring_cqring_wait - called before start waiting for an available CQE |
| 232 | * |
| 233 | * @ctx: pointer to a ring context structure |
| 234 | * @min_events: minimal number of events to wait for |
| 235 | * |
| 236 | * Allows to track waiting for CQE, so that we can e.g. troubleshoot |
| 237 | * situations, when an application wants to wait for an event, that never |
| 238 | * comes. |
| 239 | */ |
| 240 | TRACE_EVENT(io_uring_cqring_wait, |
| 241 | |
| 242 | TP_PROTO(void *ctx, int min_events), |
| 243 | |
| 244 | TP_ARGS(ctx, min_events), |
| 245 | |
| 246 | TP_STRUCT__entry ( |
| 247 | __field( void *, ctx ) |
| 248 | __field( int, min_events ) |
| 249 | ), |
| 250 | |
| 251 | TP_fast_assign( |
| 252 | __entry->ctx = ctx; |
| 253 | __entry->min_events = min_events; |
| 254 | ), |
| 255 | |
| 256 | TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events) |
| 257 | ); |
| 258 | |
| 259 | /** |
| 260 | * io_uring_fail_link - called before failing a linked request |
| 261 | * |
| 262 | * @req: request, which links were cancelled |
| 263 | * @link: cancelled link |
| 264 | * |
| 265 | * Allows to track linked requests cancellation, to see not only that some work |
| 266 | * was cancelled, but also which request was the reason. |
| 267 | */ |
| 268 | TRACE_EVENT(io_uring_fail_link, |
| 269 | |
| 270 | TP_PROTO(void *req, void *link), |
| 271 | |
| 272 | TP_ARGS(req, link), |
| 273 | |
| 274 | TP_STRUCT__entry ( |
| 275 | __field( void *, req ) |
| 276 | __field( void *, link ) |
| 277 | ), |
| 278 | |
| 279 | TP_fast_assign( |
| 280 | __entry->req = req; |
| 281 | __entry->link = link; |
| 282 | ), |
| 283 | |
| 284 | TP_printk("request %p, link %p", __entry->req, __entry->link) |
| 285 | ); |
| 286 | |
| 287 | /** |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 288 | * io_uring_complete - called when completing an SQE |
| 289 | * |
| 290 | * @ctx: pointer to a ring context structure |
| 291 | * @user_data: user data associated with the request |
| 292 | * @res: result of the request |
| 293 | * |
| 294 | */ |
| 295 | TRACE_EVENT(io_uring_complete, |
| 296 | |
| 297 | TP_PROTO(void *ctx, u64 user_data, long res), |
| 298 | |
| 299 | TP_ARGS(ctx, user_data, res), |
| 300 | |
| 301 | TP_STRUCT__entry ( |
| 302 | __field( void *, ctx ) |
| 303 | __field( u64, user_data ) |
| 304 | __field( long, res ) |
| 305 | ), |
| 306 | |
| 307 | TP_fast_assign( |
| 308 | __entry->ctx = ctx; |
| 309 | __entry->user_data = user_data; |
| 310 | __entry->res = res; |
| 311 | ), |
| 312 | |
| 313 | TP_printk("ring %p, user_data 0x%llx, result %ld", |
| 314 | __entry->ctx, (unsigned long long)__entry->user_data, |
| 315 | __entry->res) |
| 316 | ); |
| 317 | |
| 318 | |
| 319 | /** |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 320 | * io_uring_submit_sqe - called before submitting one SQE |
| 321 | * |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 322 | * @ctx: pointer to a ring context structure |
| 323 | * @user_data: user data associated with the request |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 324 | * @force_nonblock: whether a context blocking or not |
| 325 | * @sq_thread: true if sq_thread has submitted this SQE |
| 326 | * |
| 327 | * Allows to track SQE submitting, to understand what was the source of it, SQ |
| 328 | * thread or io_uring_enter call. |
| 329 | */ |
| 330 | TRACE_EVENT(io_uring_submit_sqe, |
| 331 | |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 332 | TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread), |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 333 | |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 334 | TP_ARGS(ctx, user_data, force_nonblock, sq_thread), |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 335 | |
| 336 | TP_STRUCT__entry ( |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 337 | __field( void *, ctx ) |
| 338 | __field( u64, user_data ) |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 339 | __field( bool, force_nonblock ) |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 340 | __field( bool, sq_thread ) |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 341 | ), |
| 342 | |
| 343 | TP_fast_assign( |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 344 | __entry->ctx = ctx; |
| 345 | __entry->user_data = user_data; |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 346 | __entry->force_nonblock = force_nonblock; |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 347 | __entry->sq_thread = sq_thread; |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 348 | ), |
| 349 | |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 350 | TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d", |
| 351 | __entry->ctx, (unsigned long long) __entry->user_data, |
| 352 | __entry->force_nonblock, __entry->sq_thread) |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 353 | ); |
| 354 | |
| 355 | #endif /* _TRACE_IO_URING_H */ |
| 356 | |
| 357 | /* This part must be outside protection */ |
| 358 | #include <trace/define_trace.h> |