io_uring.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM io_uring
  4. #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_IO_URING_H
  6. #include <linux/tracepoint.h>
  7. #include <uapi/linux/io_uring.h>
  8. #include <linux/io_uring_types.h>
  9. #include <linux/io_uring.h>
  10. struct io_wq_work;
  11. /**
  12. * io_uring_create - called after a new io_uring context was prepared
  13. *
  14. * @fd: corresponding file descriptor
  15. * @ctx: pointer to a ring context structure
  16. * @sq_entries: actual SQ size
  17. * @cq_entries: actual CQ size
  18. * @flags: SQ ring flags, provided to io_uring_setup(2)
  19. *
  20. * Allows to trace io_uring creation and provide pointer to a context, that can
  21. * be used later to find correlated events.
  22. */
  23. TRACE_EVENT(io_uring_create,
  24. TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
  25. TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
  26. TP_STRUCT__entry (
  27. __field( int, fd )
  28. __field( void *, ctx )
  29. __field( u32, sq_entries )
  30. __field( u32, cq_entries )
  31. __field( u32, flags )
  32. ),
  33. TP_fast_assign(
  34. __entry->fd = fd;
  35. __entry->ctx = ctx;
  36. __entry->sq_entries = sq_entries;
  37. __entry->cq_entries = cq_entries;
  38. __entry->flags = flags;
  39. ),
  40. TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
  41. __entry->ctx, __entry->fd, __entry->sq_entries,
  42. __entry->cq_entries, __entry->flags)
  43. );
  44. /**
  45. * io_uring_register - called after a buffer/file/eventfd was successfully
  46. * registered for a ring
  47. *
  48. * @ctx: pointer to a ring context structure
  49. * @opcode: describes which operation to perform
  50. * @nr_user_files: number of registered files
  51. * @nr_user_bufs: number of registered buffers
  52. * @ret: return code
  53. *
  54. * Allows to trace fixed files/buffers, that could be registered to
  55. * avoid an overhead of getting references to them for every operation. This
  56. * event, together with io_uring_file_get, can provide a full picture of how
  57. * much overhead one can reduce via fixing.
  58. */
  59. TRACE_EVENT(io_uring_register,
  60. TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
  61. unsigned nr_bufs, long ret),
  62. TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
  63. TP_STRUCT__entry (
  64. __field( void *, ctx )
  65. __field( unsigned, opcode )
  66. __field( unsigned, nr_files)
  67. __field( unsigned, nr_bufs )
  68. __field( long, ret )
  69. ),
  70. TP_fast_assign(
  71. __entry->ctx = ctx;
  72. __entry->opcode = opcode;
  73. __entry->nr_files = nr_files;
  74. __entry->nr_bufs = nr_bufs;
  75. __entry->ret = ret;
  76. ),
  77. TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
  78. "ret %ld",
  79. __entry->ctx, __entry->opcode, __entry->nr_files,
  80. __entry->nr_bufs, __entry->ret)
  81. );
  82. /**
  83. * io_uring_file_get - called before getting references to an SQE file
  84. *
  85. * @req: pointer to a submitted request
  86. * @fd: SQE file descriptor
  87. *
  88. * Allows to trace out how often an SQE file reference is obtained, which can
  89. * help figuring out if it makes sense to use fixed files, or check that fixed
  90. * files are used correctly.
  91. */
  92. TRACE_EVENT(io_uring_file_get,
  93. TP_PROTO(struct io_kiocb *req, int fd),
  94. TP_ARGS(req, fd),
  95. TP_STRUCT__entry (
  96. __field( void *, ctx )
  97. __field( void *, req )
  98. __field( u64, user_data )
  99. __field( int, fd )
  100. ),
  101. TP_fast_assign(
  102. __entry->ctx = req->ctx;
  103. __entry->req = req;
  104. __entry->user_data = req->cqe.user_data;
  105. __entry->fd = fd;
  106. ),
  107. TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
  108. __entry->ctx, __entry->req, __entry->user_data, __entry->fd)
  109. );
  110. /**
  111. * io_uring_queue_async_work - called before submitting a new async work
  112. *
  113. * @req: pointer to a submitted request
  114. * @rw: type of workqueue, hashed or normal
  115. *
  116. * Allows to trace asynchronous work submission.
  117. */
  118. TRACE_EVENT(io_uring_queue_async_work,
  119. TP_PROTO(struct io_kiocb *req, int rw),
  120. TP_ARGS(req, rw),
  121. TP_STRUCT__entry (
  122. __field( void *, ctx )
  123. __field( void *, req )
  124. __field( u64, user_data )
  125. __field( u8, opcode )
  126. __field( unsigned int, flags )
  127. __field( struct io_wq_work *, work )
  128. __field( int, rw )
  129. __string( op_str, io_uring_get_opcode(req->opcode) )
  130. ),
  131. TP_fast_assign(
  132. __entry->ctx = req->ctx;
  133. __entry->req = req;
  134. __entry->user_data = req->cqe.user_data;
  135. __entry->flags = req->flags;
  136. __entry->opcode = req->opcode;
  137. __entry->work = &req->work;
  138. __entry->rw = rw;
  139. __assign_str(op_str, io_uring_get_opcode(req->opcode));
  140. ),
  141. TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
  142. __entry->ctx, __entry->req, __entry->user_data,
  143. __get_str(op_str),
  144. __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
  145. );
  146. /**
  147. * io_uring_defer - called when an io_uring request is deferred
  148. *
  149. * @req: pointer to a deferred request
  150. *
  151. * Allows to track deferred requests, to get an insight about what requests are
  152. * not started immediately.
  153. */
  154. TRACE_EVENT(io_uring_defer,
  155. TP_PROTO(struct io_kiocb *req),
  156. TP_ARGS(req),
  157. TP_STRUCT__entry (
  158. __field( void *, ctx )
  159. __field( void *, req )
  160. __field( unsigned long long, data )
  161. __field( u8, opcode )
  162. __string( op_str, io_uring_get_opcode(req->opcode) )
  163. ),
  164. TP_fast_assign(
  165. __entry->ctx = req->ctx;
  166. __entry->req = req;
  167. __entry->data = req->cqe.user_data;
  168. __entry->opcode = req->opcode;
  169. __assign_str(op_str, io_uring_get_opcode(req->opcode));
  170. ),
  171. TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
  172. __entry->ctx, __entry->req, __entry->data,
  173. __get_str(op_str))
  174. );
  175. /**
  176. * io_uring_link - called before the io_uring request added into link_list of
  177. * another request
  178. *
  179. * @req: pointer to a linked request
  180. * @target_req: pointer to a previous request, that would contain @req
  181. *
  182. * Allows to track linked requests, to understand dependencies between requests
  183. * and how does it influence their execution flow.
  184. */
  185. TRACE_EVENT(io_uring_link,
  186. TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
  187. TP_ARGS(req, target_req),
  188. TP_STRUCT__entry (
  189. __field( void *, ctx )
  190. __field( void *, req )
  191. __field( void *, target_req )
  192. ),
  193. TP_fast_assign(
  194. __entry->ctx = req->ctx;
  195. __entry->req = req;
  196. __entry->target_req = target_req;
  197. ),
  198. TP_printk("ring %p, request %p linked after %p",
  199. __entry->ctx, __entry->req, __entry->target_req)
  200. );
  201. /**
  202. * io_uring_cqring_wait - called before start waiting for an available CQE
  203. *
  204. * @ctx: pointer to a ring context structure
  205. * @min_events: minimal number of events to wait for
  206. *
  207. * Allows to track waiting for CQE, so that we can e.g. troubleshoot
  208. * situations, when an application wants to wait for an event, that never
  209. * comes.
  210. */
  211. TRACE_EVENT(io_uring_cqring_wait,
  212. TP_PROTO(void *ctx, int min_events),
  213. TP_ARGS(ctx, min_events),
  214. TP_STRUCT__entry (
  215. __field( void *, ctx )
  216. __field( int, min_events )
  217. ),
  218. TP_fast_assign(
  219. __entry->ctx = ctx;
  220. __entry->min_events = min_events;
  221. ),
  222. TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
  223. );
  224. /**
  225. * io_uring_fail_link - called before failing a linked request
  226. *
  227. * @req: request, which links were cancelled
  228. * @link: cancelled link
  229. *
  230. * Allows to track linked requests cancellation, to see not only that some work
  231. * was cancelled, but also which request was the reason.
  232. */
  233. TRACE_EVENT(io_uring_fail_link,
  234. TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
  235. TP_ARGS(req, link),
  236. TP_STRUCT__entry (
  237. __field( void *, ctx )
  238. __field( void *, req )
  239. __field( unsigned long long, user_data )
  240. __field( u8, opcode )
  241. __field( void *, link )
  242. __string( op_str, io_uring_get_opcode(req->opcode) )
  243. ),
  244. TP_fast_assign(
  245. __entry->ctx = req->ctx;
  246. __entry->req = req;
  247. __entry->user_data = req->cqe.user_data;
  248. __entry->opcode = req->opcode;
  249. __entry->link = link;
  250. __assign_str(op_str, io_uring_get_opcode(req->opcode));
  251. ),
  252. TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
  253. __entry->ctx, __entry->req, __entry->user_data,
  254. __get_str(op_str), __entry->link)
  255. );
  256. /**
  257. * io_uring_complete - called when completing an SQE
  258. *
  259. * @ctx: pointer to a ring context structure
  260. * @req: pointer to a submitted request
  261. * @user_data: user data associated with the request
  262. * @res: result of the request
  263. * @cflags: completion flags
  264. * @extra1: extra 64-bit data for CQE32
  265. * @extra2: extra 64-bit data for CQE32
  266. *
  267. */
  268. TRACE_EVENT(io_uring_complete,
  269. TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
  270. u64 extra1, u64 extra2),
  271. TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
  272. TP_STRUCT__entry (
  273. __field( void *, ctx )
  274. __field( void *, req )
  275. __field( u64, user_data )
  276. __field( int, res )
  277. __field( unsigned, cflags )
  278. __field( u64, extra1 )
  279. __field( u64, extra2 )
  280. ),
  281. TP_fast_assign(
  282. __entry->ctx = ctx;
  283. __entry->req = req;
  284. __entry->user_data = user_data;
  285. __entry->res = res;
  286. __entry->cflags = cflags;
  287. __entry->extra1 = extra1;
  288. __entry->extra2 = extra2;
  289. ),
  290. TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
  291. "extra1 %llu extra2 %llu ",
  292. __entry->ctx, __entry->req,
  293. __entry->user_data,
  294. __entry->res, __entry->cflags,
  295. (unsigned long long) __entry->extra1,
  296. (unsigned long long) __entry->extra2)
  297. );
  298. /**
  299. * io_uring_submit_sqe - called before submitting one SQE
  300. *
  301. * @req: pointer to a submitted request
  302. * @force_nonblock: whether a context blocking or not
  303. *
  304. * Allows to track SQE submitting, to understand what was the source of it, SQ
  305. * thread or io_uring_enter call.
  306. */
  307. TRACE_EVENT(io_uring_submit_sqe,
  308. TP_PROTO(struct io_kiocb *req, bool force_nonblock),
  309. TP_ARGS(req, force_nonblock),
  310. TP_STRUCT__entry (
  311. __field( void *, ctx )
  312. __field( void *, req )
  313. __field( unsigned long long, user_data )
  314. __field( u8, opcode )
  315. __field( u32, flags )
  316. __field( bool, force_nonblock )
  317. __field( bool, sq_thread )
  318. __string( op_str, io_uring_get_opcode(req->opcode) )
  319. ),
  320. TP_fast_assign(
  321. __entry->ctx = req->ctx;
  322. __entry->req = req;
  323. __entry->user_data = req->cqe.user_data;
  324. __entry->opcode = req->opcode;
  325. __entry->flags = req->flags;
  326. __entry->force_nonblock = force_nonblock;
  327. __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
  328. __assign_str(op_str, io_uring_get_opcode(req->opcode));
  329. ),
  330. TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
  331. "non block %d, sq_thread %d", __entry->ctx, __entry->req,
  332. __entry->user_data, __get_str(op_str),
  333. __entry->flags, __entry->force_nonblock, __entry->sq_thread)
  334. );
  335. /*
  336. * io_uring_poll_arm - called after arming a poll wait if successful
  337. *
  338. * @req: pointer to the armed request
  339. * @mask: request poll events mask
  340. * @events: registered events of interest
  341. *
  342. * Allows to track which fds are waiting for and what are the events of
  343. * interest.
  344. */
  345. TRACE_EVENT(io_uring_poll_arm,
  346. TP_PROTO(struct io_kiocb *req, int mask, int events),
  347. TP_ARGS(req, mask, events),
  348. TP_STRUCT__entry (
  349. __field( void *, ctx )
  350. __field( void *, req )
  351. __field( unsigned long long, user_data )
  352. __field( u8, opcode )
  353. __field( int, mask )
  354. __field( int, events )
  355. __string( op_str, io_uring_get_opcode(req->opcode) )
  356. ),
  357. TP_fast_assign(
  358. __entry->ctx = req->ctx;
  359. __entry->req = req;
  360. __entry->user_data = req->cqe.user_data;
  361. __entry->opcode = req->opcode;
  362. __entry->mask = mask;
  363. __entry->events = events;
  364. __assign_str(op_str, io_uring_get_opcode(req->opcode));
  365. ),
  366. TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
  367. __entry->ctx, __entry->req, __entry->user_data,
  368. __get_str(op_str),
  369. __entry->mask, __entry->events)
  370. );
  371. /*
  372. * io_uring_task_add - called after adding a task
  373. *
  374. * @req: pointer to request
  375. * @mask: request poll events mask
  376. *
  377. */
  378. TRACE_EVENT(io_uring_task_add,
  379. TP_PROTO(struct io_kiocb *req, int mask),
  380. TP_ARGS(req, mask),
  381. TP_STRUCT__entry (
  382. __field( void *, ctx )
  383. __field( void *, req )
  384. __field( unsigned long long, user_data )
  385. __field( u8, opcode )
  386. __field( int, mask )
  387. __string( op_str, io_uring_get_opcode(req->opcode) )
  388. ),
  389. TP_fast_assign(
  390. __entry->ctx = req->ctx;
  391. __entry->req = req;
  392. __entry->user_data = req->cqe.user_data;
  393. __entry->opcode = req->opcode;
  394. __entry->mask = mask;
  395. __assign_str(op_str, io_uring_get_opcode(req->opcode));
  396. ),
  397. TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
  398. __entry->ctx, __entry->req, __entry->user_data,
  399. __get_str(op_str),
  400. __entry->mask)
  401. );
  402. /*
  403. * io_uring_req_failed - called when an sqe is errored dring submission
  404. *
  405. * @sqe: pointer to the io_uring_sqe that failed
  406. * @req: pointer to request
  407. * @error: error it failed with
  408. *
  409. * Allows easier diagnosing of malformed requests in production systems.
  410. */
  411. TRACE_EVENT(io_uring_req_failed,
  412. TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
  413. TP_ARGS(sqe, req, error),
  414. TP_STRUCT__entry (
  415. __field( void *, ctx )
  416. __field( void *, req )
  417. __field( unsigned long long, user_data )
  418. __field( u8, opcode )
  419. __field( u8, flags )
  420. __field( u8, ioprio )
  421. __field( u64, off )
  422. __field( u64, addr )
  423. __field( u32, len )
  424. __field( u32, op_flags )
  425. __field( u16, buf_index )
  426. __field( u16, personality )
  427. __field( u32, file_index )
  428. __field( u64, pad1 )
  429. __field( u64, addr3 )
  430. __field( int, error )
  431. __string( op_str, io_uring_get_opcode(sqe->opcode) )
  432. ),
  433. TP_fast_assign(
  434. __entry->ctx = req->ctx;
  435. __entry->req = req;
  436. __entry->user_data = sqe->user_data;
  437. __entry->opcode = sqe->opcode;
  438. __entry->flags = sqe->flags;
  439. __entry->ioprio = sqe->ioprio;
  440. __entry->off = sqe->off;
  441. __entry->addr = sqe->addr;
  442. __entry->len = sqe->len;
  443. __entry->op_flags = sqe->poll32_events;
  444. __entry->buf_index = sqe->buf_index;
  445. __entry->personality = sqe->personality;
  446. __entry->file_index = sqe->file_index;
  447. __entry->pad1 = sqe->__pad2[0];
  448. __entry->addr3 = sqe->addr3;
  449. __entry->error = error;
  450. __assign_str(op_str, io_uring_get_opcode(sqe->opcode));
  451. ),
  452. TP_printk("ring %p, req %p, user_data 0x%llx, "
  453. "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
  454. "len=%u, rw_flags=0x%x, buf_index=%d, "
  455. "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
  456. "error=%d",
  457. __entry->ctx, __entry->req, __entry->user_data,
  458. __get_str(op_str),
  459. __entry->flags, __entry->ioprio,
  460. (unsigned long long)__entry->off,
  461. (unsigned long long) __entry->addr, __entry->len,
  462. __entry->op_flags,
  463. __entry->buf_index, __entry->personality, __entry->file_index,
  464. (unsigned long long) __entry->pad1,
  465. (unsigned long long) __entry->addr3, __entry->error)
  466. );
  467. /*
  468. * io_uring_cqe_overflow - a CQE overflowed
  469. *
  470. * @ctx: pointer to a ring context structure
  471. * @user_data: user data associated with the request
  472. * @res: CQE result
  473. * @cflags: CQE flags
  474. * @ocqe: pointer to the overflow cqe (if available)
  475. *
  476. */
  477. TRACE_EVENT(io_uring_cqe_overflow,
  478. TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
  479. void *ocqe),
  480. TP_ARGS(ctx, user_data, res, cflags, ocqe),
  481. TP_STRUCT__entry (
  482. __field( void *, ctx )
  483. __field( unsigned long long, user_data )
  484. __field( s32, res )
  485. __field( u32, cflags )
  486. __field( void *, ocqe )
  487. ),
  488. TP_fast_assign(
  489. __entry->ctx = ctx;
  490. __entry->user_data = user_data;
  491. __entry->res = res;
  492. __entry->cflags = cflags;
  493. __entry->ocqe = ocqe;
  494. ),
  495. TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, "
  496. "overflow_cqe %p",
  497. __entry->ctx, __entry->user_data, __entry->res,
  498. __entry->cflags, __entry->ocqe)
  499. );
  500. /*
  501. * io_uring_task_work_run - ran task work
  502. *
  503. * @tctx: pointer to a io_uring_task
  504. * @count: how many functions it ran
  505. * @loops: how many loops it ran
  506. *
  507. */
  508. TRACE_EVENT(io_uring_task_work_run,
  509. TP_PROTO(void *tctx, unsigned int count, unsigned int loops),
  510. TP_ARGS(tctx, count, loops),
  511. TP_STRUCT__entry (
  512. __field( void *, tctx )
  513. __field( unsigned int, count )
  514. __field( unsigned int, loops )
  515. ),
  516. TP_fast_assign(
  517. __entry->tctx = tctx;
  518. __entry->count = count;
  519. __entry->loops = loops;
  520. ),
  521. TP_printk("tctx %p, count %u, loops %u",
  522. __entry->tctx, __entry->count, __entry->loops)
  523. );
  524. TRACE_EVENT(io_uring_short_write,
  525. TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got),
  526. TP_ARGS(ctx, fpos, wanted, got),
  527. TP_STRUCT__entry(
  528. __field(void *, ctx)
  529. __field(u64, fpos)
  530. __field(u64, wanted)
  531. __field(u64, got)
  532. ),
  533. TP_fast_assign(
  534. __entry->ctx = ctx;
  535. __entry->fpos = fpos;
  536. __entry->wanted = wanted;
  537. __entry->got = got;
  538. ),
  539. TP_printk("ring %p, fpos %lld, wanted %lld, got %lld",
  540. __entry->ctx, __entry->fpos,
  541. __entry->wanted, __entry->got)
  542. );
  543. /*
  544. * io_uring_local_work_run - ran ring local task work
  545. *
  546. * @tctx: pointer to a io_uring_ctx
  547. * @count: how many functions it ran
  548. * @loops: how many loops it ran
  549. *
  550. */
  551. TRACE_EVENT(io_uring_local_work_run,
  552. TP_PROTO(void *ctx, int count, unsigned int loops),
  553. TP_ARGS(ctx, count, loops),
  554. TP_STRUCT__entry (
  555. __field(void *, ctx )
  556. __field(int, count )
  557. __field(unsigned int, loops )
  558. ),
  559. TP_fast_assign(
  560. __entry->ctx = ctx;
  561. __entry->count = count;
  562. __entry->loops = loops;
  563. ),
  564. TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
  565. );
  566. #endif /* _TRACE_IO_URING_H */
  567. /* This part must be outside protection */
  568. #include <trace/define_trace.h>