writeback.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM writeback
  4. #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_WRITEBACK_H
  6. #include <linux/tracepoint.h>
  7. #include <linux/backing-dev.h>
  8. #include <linux/writeback.h>
  9. #define show_inode_state(state) \
  10. __print_flags(state, "|", \
  11. {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
  12. {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
  13. {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
  14. {I_NEW, "I_NEW"}, \
  15. {I_WILL_FREE, "I_WILL_FREE"}, \
  16. {I_FREEING, "I_FREEING"}, \
  17. {I_CLEAR, "I_CLEAR"}, \
  18. {I_SYNC, "I_SYNC"}, \
  19. {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
  20. {I_REFERENCED, "I_REFERENCED"} \
  21. )
  22. /* enums need to be exported to user space */
  23. #undef EM
  24. #undef EMe
  25. #define EM(a,b) TRACE_DEFINE_ENUM(a);
  26. #define EMe(a,b) TRACE_DEFINE_ENUM(a);
  27. #define WB_WORK_REASON \
  28. EM( WB_REASON_BACKGROUND, "background") \
  29. EM( WB_REASON_VMSCAN, "vmscan") \
  30. EM( WB_REASON_SYNC, "sync") \
  31. EM( WB_REASON_PERIODIC, "periodic") \
  32. EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
  33. EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
  34. EM( WB_REASON_FORKER_THREAD, "forker_thread") \
  35. EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
  36. WB_WORK_REASON
  37. /*
  38. * Now redefine the EM() and EMe() macros to map the enums to the strings
  39. * that will be printed in the output.
  40. */
  41. #undef EM
  42. #undef EMe
  43. #define EM(a,b) { a, b },
  44. #define EMe(a,b) { a, b }
  45. struct wb_writeback_work;
  46. DECLARE_EVENT_CLASS(writeback_folio_template,
  47. TP_PROTO(struct folio *folio, struct address_space *mapping),
  48. TP_ARGS(folio, mapping),
  49. TP_STRUCT__entry (
  50. __array(char, name, 32)
  51. __field(ino_t, ino)
  52. __field(pgoff_t, index)
  53. ),
  54. TP_fast_assign(
  55. strscpy_pad(__entry->name,
  56. bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
  57. NULL), 32);
  58. __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
  59. __entry->index = folio->index;
  60. ),
  61. TP_printk("bdi %s: ino=%lu index=%lu",
  62. __entry->name,
  63. (unsigned long)__entry->ino,
  64. __entry->index
  65. )
  66. );
  67. DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio,
  68. TP_PROTO(struct folio *folio, struct address_space *mapping),
  69. TP_ARGS(folio, mapping)
  70. );
  71. DEFINE_EVENT(writeback_folio_template, folio_wait_writeback,
  72. TP_PROTO(struct folio *folio, struct address_space *mapping),
  73. TP_ARGS(folio, mapping)
  74. );
  75. DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
  76. TP_PROTO(struct inode *inode, int flags),
  77. TP_ARGS(inode, flags),
  78. TP_STRUCT__entry (
  79. __array(char, name, 32)
  80. __field(ino_t, ino)
  81. __field(unsigned long, state)
  82. __field(unsigned long, flags)
  83. ),
  84. TP_fast_assign(
  85. struct backing_dev_info *bdi = inode_to_bdi(inode);
  86. /* may be called for files on pseudo FSes w/ unregistered bdi */
  87. strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
  88. __entry->ino = inode->i_ino;
  89. __entry->state = inode->i_state;
  90. __entry->flags = flags;
  91. ),
  92. TP_printk("bdi %s: ino=%lu state=%s flags=%s",
  93. __entry->name,
  94. (unsigned long)__entry->ino,
  95. show_inode_state(__entry->state),
  96. show_inode_state(__entry->flags)
  97. )
  98. );
  99. DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
  100. TP_PROTO(struct inode *inode, int flags),
  101. TP_ARGS(inode, flags)
  102. );
  103. DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
  104. TP_PROTO(struct inode *inode, int flags),
  105. TP_ARGS(inode, flags)
  106. );
  107. DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
  108. TP_PROTO(struct inode *inode, int flags),
  109. TP_ARGS(inode, flags)
  110. );
  111. #ifdef CREATE_TRACE_POINTS
  112. #ifdef CONFIG_CGROUP_WRITEBACK
  113. static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
  114. {
  115. return cgroup_ino(wb->memcg_css->cgroup);
  116. }
  117. static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
  118. {
  119. if (wbc->wb)
  120. return __trace_wb_assign_cgroup(wbc->wb);
  121. else
  122. return 1;
  123. }
  124. #else /* CONFIG_CGROUP_WRITEBACK */
  125. static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
  126. {
  127. return 1;
  128. }
  129. static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
  130. {
  131. return 1;
  132. }
  133. #endif /* CONFIG_CGROUP_WRITEBACK */
  134. #endif /* CREATE_TRACE_POINTS */
  135. #ifdef CONFIG_CGROUP_WRITEBACK
  136. TRACE_EVENT(inode_foreign_history,
  137. TP_PROTO(struct inode *inode, struct writeback_control *wbc,
  138. unsigned int history),
  139. TP_ARGS(inode, wbc, history),
  140. TP_STRUCT__entry(
  141. __array(char, name, 32)
  142. __field(ino_t, ino)
  143. __field(ino_t, cgroup_ino)
  144. __field(unsigned int, history)
  145. ),
  146. TP_fast_assign(
  147. strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
  148. __entry->ino = inode->i_ino;
  149. __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
  150. __entry->history = history;
  151. ),
  152. TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
  153. __entry->name,
  154. (unsigned long)__entry->ino,
  155. (unsigned long)__entry->cgroup_ino,
  156. __entry->history
  157. )
  158. );
  159. TRACE_EVENT(inode_switch_wbs,
  160. TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
  161. struct bdi_writeback *new_wb),
  162. TP_ARGS(inode, old_wb, new_wb),
  163. TP_STRUCT__entry(
  164. __array(char, name, 32)
  165. __field(ino_t, ino)
  166. __field(ino_t, old_cgroup_ino)
  167. __field(ino_t, new_cgroup_ino)
  168. ),
  169. TP_fast_assign(
  170. strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
  171. __entry->ino = inode->i_ino;
  172. __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
  173. __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
  174. ),
  175. TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
  176. __entry->name,
  177. (unsigned long)__entry->ino,
  178. (unsigned long)__entry->old_cgroup_ino,
  179. (unsigned long)__entry->new_cgroup_ino
  180. )
  181. );
  182. TRACE_EVENT(track_foreign_dirty,
  183. TP_PROTO(struct folio *folio, struct bdi_writeback *wb),
  184. TP_ARGS(folio, wb),
  185. TP_STRUCT__entry(
  186. __array(char, name, 32)
  187. __field(u64, bdi_id)
  188. __field(ino_t, ino)
  189. __field(unsigned int, memcg_id)
  190. __field(ino_t, cgroup_ino)
  191. __field(ino_t, page_cgroup_ino)
  192. ),
  193. TP_fast_assign(
  194. struct address_space *mapping = folio_mapping(folio);
  195. struct inode *inode = mapping ? mapping->host : NULL;
  196. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  197. __entry->bdi_id = wb->bdi->id;
  198. __entry->ino = inode ? inode->i_ino : 0;
  199. __entry->memcg_id = wb->memcg_css->id;
  200. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  201. __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup);
  202. ),
  203. TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
  204. __entry->name,
  205. __entry->bdi_id,
  206. (unsigned long)__entry->ino,
  207. __entry->memcg_id,
  208. (unsigned long)__entry->cgroup_ino,
  209. (unsigned long)__entry->page_cgroup_ino
  210. )
  211. );
  212. TRACE_EVENT(flush_foreign,
  213. TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id,
  214. unsigned int frn_memcg_id),
  215. TP_ARGS(wb, frn_bdi_id, frn_memcg_id),
  216. TP_STRUCT__entry(
  217. __array(char, name, 32)
  218. __field(ino_t, cgroup_ino)
  219. __field(unsigned int, frn_bdi_id)
  220. __field(unsigned int, frn_memcg_id)
  221. ),
  222. TP_fast_assign(
  223. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  224. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  225. __entry->frn_bdi_id = frn_bdi_id;
  226. __entry->frn_memcg_id = frn_memcg_id;
  227. ),
  228. TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
  229. __entry->name,
  230. (unsigned long)__entry->cgroup_ino,
  231. __entry->frn_bdi_id,
  232. __entry->frn_memcg_id
  233. )
  234. );
  235. #endif
  236. DECLARE_EVENT_CLASS(writeback_write_inode_template,
  237. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  238. TP_ARGS(inode, wbc),
  239. TP_STRUCT__entry (
  240. __array(char, name, 32)
  241. __field(ino_t, ino)
  242. __field(int, sync_mode)
  243. __field(ino_t, cgroup_ino)
  244. ),
  245. TP_fast_assign(
  246. strscpy_pad(__entry->name,
  247. bdi_dev_name(inode_to_bdi(inode)), 32);
  248. __entry->ino = inode->i_ino;
  249. __entry->sync_mode = wbc->sync_mode;
  250. __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
  251. ),
  252. TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
  253. __entry->name,
  254. (unsigned long)__entry->ino,
  255. __entry->sync_mode,
  256. (unsigned long)__entry->cgroup_ino
  257. )
  258. );
  259. DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
  260. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  261. TP_ARGS(inode, wbc)
  262. );
  263. DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
  264. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  265. TP_ARGS(inode, wbc)
  266. );
  267. DECLARE_EVENT_CLASS(writeback_work_class,
  268. TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
  269. TP_ARGS(wb, work),
  270. TP_STRUCT__entry(
  271. __array(char, name, 32)
  272. __field(long, nr_pages)
  273. __field(dev_t, sb_dev)
  274. __field(int, sync_mode)
  275. __field(int, for_kupdate)
  276. __field(int, range_cyclic)
  277. __field(int, for_background)
  278. __field(int, reason)
  279. __field(ino_t, cgroup_ino)
  280. ),
  281. TP_fast_assign(
  282. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  283. __entry->nr_pages = work->nr_pages;
  284. __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
  285. __entry->sync_mode = work->sync_mode;
  286. __entry->for_kupdate = work->for_kupdate;
  287. __entry->range_cyclic = work->range_cyclic;
  288. __entry->for_background = work->for_background;
  289. __entry->reason = work->reason;
  290. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  291. ),
  292. TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
  293. "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
  294. __entry->name,
  295. MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
  296. __entry->nr_pages,
  297. __entry->sync_mode,
  298. __entry->for_kupdate,
  299. __entry->range_cyclic,
  300. __entry->for_background,
  301. __print_symbolic(__entry->reason, WB_WORK_REASON),
  302. (unsigned long)__entry->cgroup_ino
  303. )
  304. );
  305. #define DEFINE_WRITEBACK_WORK_EVENT(name) \
  306. DEFINE_EVENT(writeback_work_class, name, \
  307. TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
  308. TP_ARGS(wb, work))
  309. DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
  310. DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
  311. DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
  312. DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
  313. DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
  314. TRACE_EVENT(writeback_pages_written,
  315. TP_PROTO(long pages_written),
  316. TP_ARGS(pages_written),
  317. TP_STRUCT__entry(
  318. __field(long, pages)
  319. ),
  320. TP_fast_assign(
  321. __entry->pages = pages_written;
  322. ),
  323. TP_printk("%ld", __entry->pages)
  324. );
  325. DECLARE_EVENT_CLASS(writeback_class,
  326. TP_PROTO(struct bdi_writeback *wb),
  327. TP_ARGS(wb),
  328. TP_STRUCT__entry(
  329. __array(char, name, 32)
  330. __field(ino_t, cgroup_ino)
  331. ),
  332. TP_fast_assign(
  333. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  334. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  335. ),
  336. TP_printk("bdi %s: cgroup_ino=%lu",
  337. __entry->name,
  338. (unsigned long)__entry->cgroup_ino
  339. )
  340. );
  341. #define DEFINE_WRITEBACK_EVENT(name) \
  342. DEFINE_EVENT(writeback_class, name, \
  343. TP_PROTO(struct bdi_writeback *wb), \
  344. TP_ARGS(wb))
  345. DEFINE_WRITEBACK_EVENT(writeback_wake_background);
  346. TRACE_EVENT(writeback_bdi_register,
  347. TP_PROTO(struct backing_dev_info *bdi),
  348. TP_ARGS(bdi),
  349. TP_STRUCT__entry(
  350. __array(char, name, 32)
  351. ),
  352. TP_fast_assign(
  353. strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
  354. ),
  355. TP_printk("bdi %s",
  356. __entry->name
  357. )
  358. );
  359. DECLARE_EVENT_CLASS(wbc_class,
  360. TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
  361. TP_ARGS(wbc, bdi),
  362. TP_STRUCT__entry(
  363. __array(char, name, 32)
  364. __field(long, nr_to_write)
  365. __field(long, pages_skipped)
  366. __field(int, sync_mode)
  367. __field(int, for_kupdate)
  368. __field(int, for_background)
  369. __field(int, for_reclaim)
  370. __field(int, range_cyclic)
  371. __field(long, range_start)
  372. __field(long, range_end)
  373. __field(ino_t, cgroup_ino)
  374. ),
  375. TP_fast_assign(
  376. strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
  377. __entry->nr_to_write = wbc->nr_to_write;
  378. __entry->pages_skipped = wbc->pages_skipped;
  379. __entry->sync_mode = wbc->sync_mode;
  380. __entry->for_kupdate = wbc->for_kupdate;
  381. __entry->for_background = wbc->for_background;
  382. __entry->for_reclaim = wbc->for_reclaim;
  383. __entry->range_cyclic = wbc->range_cyclic;
  384. __entry->range_start = (long)wbc->range_start;
  385. __entry->range_end = (long)wbc->range_end;
  386. __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
  387. ),
  388. TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
  389. "bgrd=%d reclm=%d cyclic=%d "
  390. "start=0x%lx end=0x%lx cgroup_ino=%lu",
  391. __entry->name,
  392. __entry->nr_to_write,
  393. __entry->pages_skipped,
  394. __entry->sync_mode,
  395. __entry->for_kupdate,
  396. __entry->for_background,
  397. __entry->for_reclaim,
  398. __entry->range_cyclic,
  399. __entry->range_start,
  400. __entry->range_end,
  401. (unsigned long)__entry->cgroup_ino
  402. )
  403. )
  404. #define DEFINE_WBC_EVENT(name) \
  405. DEFINE_EVENT(wbc_class, name, \
  406. TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
  407. TP_ARGS(wbc, bdi))
  408. DEFINE_WBC_EVENT(wbc_writepage);
  409. TRACE_EVENT(writeback_queue_io,
  410. TP_PROTO(struct bdi_writeback *wb,
  411. struct wb_writeback_work *work,
  412. unsigned long dirtied_before,
  413. int moved),
  414. TP_ARGS(wb, work, dirtied_before, moved),
  415. TP_STRUCT__entry(
  416. __array(char, name, 32)
  417. __field(unsigned long, older)
  418. __field(long, age)
  419. __field(int, moved)
  420. __field(int, reason)
  421. __field(ino_t, cgroup_ino)
  422. ),
  423. TP_fast_assign(
  424. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  425. __entry->older = dirtied_before;
  426. __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
  427. __entry->moved = moved;
  428. __entry->reason = work->reason;
  429. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  430. ),
  431. TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
  432. __entry->name,
  433. __entry->older, /* dirtied_before in jiffies */
  434. __entry->age, /* dirtied_before in relative milliseconds */
  435. __entry->moved,
  436. __print_symbolic(__entry->reason, WB_WORK_REASON),
  437. (unsigned long)__entry->cgroup_ino
  438. )
  439. );
  440. TRACE_EVENT(global_dirty_state,
  441. TP_PROTO(unsigned long background_thresh,
  442. unsigned long dirty_thresh
  443. ),
  444. TP_ARGS(background_thresh,
  445. dirty_thresh
  446. ),
  447. TP_STRUCT__entry(
  448. __field(unsigned long, nr_dirty)
  449. __field(unsigned long, nr_writeback)
  450. __field(unsigned long, background_thresh)
  451. __field(unsigned long, dirty_thresh)
  452. __field(unsigned long, dirty_limit)
  453. __field(unsigned long, nr_dirtied)
  454. __field(unsigned long, nr_written)
  455. ),
  456. TP_fast_assign(
  457. __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
  458. __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
  459. __entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
  460. __entry->nr_written = global_node_page_state(NR_WRITTEN);
  461. __entry->background_thresh = background_thresh;
  462. __entry->dirty_thresh = dirty_thresh;
  463. __entry->dirty_limit = global_wb_domain.dirty_limit;
  464. ),
  465. TP_printk("dirty=%lu writeback=%lu "
  466. "bg_thresh=%lu thresh=%lu limit=%lu "
  467. "dirtied=%lu written=%lu",
  468. __entry->nr_dirty,
  469. __entry->nr_writeback,
  470. __entry->background_thresh,
  471. __entry->dirty_thresh,
  472. __entry->dirty_limit,
  473. __entry->nr_dirtied,
  474. __entry->nr_written
  475. )
  476. );
  477. #define KBps(x) ((x) << (PAGE_SHIFT - 10))
  478. TRACE_EVENT(bdi_dirty_ratelimit,
  479. TP_PROTO(struct bdi_writeback *wb,
  480. unsigned long dirty_rate,
  481. unsigned long task_ratelimit),
  482. TP_ARGS(wb, dirty_rate, task_ratelimit),
  483. TP_STRUCT__entry(
  484. __array(char, bdi, 32)
  485. __field(unsigned long, write_bw)
  486. __field(unsigned long, avg_write_bw)
  487. __field(unsigned long, dirty_rate)
  488. __field(unsigned long, dirty_ratelimit)
  489. __field(unsigned long, task_ratelimit)
  490. __field(unsigned long, balanced_dirty_ratelimit)
  491. __field(ino_t, cgroup_ino)
  492. ),
  493. TP_fast_assign(
  494. strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
  495. __entry->write_bw = KBps(wb->write_bandwidth);
  496. __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
  497. __entry->dirty_rate = KBps(dirty_rate);
  498. __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
  499. __entry->task_ratelimit = KBps(task_ratelimit);
  500. __entry->balanced_dirty_ratelimit =
  501. KBps(wb->balanced_dirty_ratelimit);
  502. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  503. ),
  504. TP_printk("bdi %s: "
  505. "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
  506. "dirty_ratelimit=%lu task_ratelimit=%lu "
  507. "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
  508. __entry->bdi,
  509. __entry->write_bw, /* write bandwidth */
  510. __entry->avg_write_bw, /* avg write bandwidth */
  511. __entry->dirty_rate, /* bdi dirty rate */
  512. __entry->dirty_ratelimit, /* base ratelimit */
  513. __entry->task_ratelimit, /* ratelimit with position control */
  514. __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
  515. (unsigned long)__entry->cgroup_ino
  516. )
  517. );
  518. TRACE_EVENT(balance_dirty_pages,
  519. TP_PROTO(struct bdi_writeback *wb,
  520. unsigned long thresh,
  521. unsigned long bg_thresh,
  522. unsigned long dirty,
  523. unsigned long bdi_thresh,
  524. unsigned long bdi_dirty,
  525. unsigned long dirty_ratelimit,
  526. unsigned long task_ratelimit,
  527. unsigned long dirtied,
  528. unsigned long period,
  529. long pause,
  530. unsigned long start_time),
  531. TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
  532. dirty_ratelimit, task_ratelimit,
  533. dirtied, period, pause, start_time),
  534. TP_STRUCT__entry(
  535. __array( char, bdi, 32)
  536. __field(unsigned long, limit)
  537. __field(unsigned long, setpoint)
  538. __field(unsigned long, dirty)
  539. __field(unsigned long, bdi_setpoint)
  540. __field(unsigned long, bdi_dirty)
  541. __field(unsigned long, dirty_ratelimit)
  542. __field(unsigned long, task_ratelimit)
  543. __field(unsigned int, dirtied)
  544. __field(unsigned int, dirtied_pause)
  545. __field(unsigned long, paused)
  546. __field( long, pause)
  547. __field(unsigned long, period)
  548. __field( long, think)
  549. __field(ino_t, cgroup_ino)
  550. ),
  551. TP_fast_assign(
  552. unsigned long freerun = (thresh + bg_thresh) / 2;
  553. strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
  554. __entry->limit = global_wb_domain.dirty_limit;
  555. __entry->setpoint = (global_wb_domain.dirty_limit +
  556. freerun) / 2;
  557. __entry->dirty = dirty;
  558. __entry->bdi_setpoint = __entry->setpoint *
  559. bdi_thresh / (thresh + 1);
  560. __entry->bdi_dirty = bdi_dirty;
  561. __entry->dirty_ratelimit = KBps(dirty_ratelimit);
  562. __entry->task_ratelimit = KBps(task_ratelimit);
  563. __entry->dirtied = dirtied;
  564. __entry->dirtied_pause = current->nr_dirtied_pause;
  565. __entry->think = current->dirty_paused_when == 0 ? 0 :
  566. (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
  567. __entry->period = period * 1000 / HZ;
  568. __entry->pause = pause * 1000 / HZ;
  569. __entry->paused = (jiffies - start_time) * 1000 / HZ;
  570. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  571. ),
  572. TP_printk("bdi %s: "
  573. "limit=%lu setpoint=%lu dirty=%lu "
  574. "bdi_setpoint=%lu bdi_dirty=%lu "
  575. "dirty_ratelimit=%lu task_ratelimit=%lu "
  576. "dirtied=%u dirtied_pause=%u "
  577. "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
  578. __entry->bdi,
  579. __entry->limit,
  580. __entry->setpoint,
  581. __entry->dirty,
  582. __entry->bdi_setpoint,
  583. __entry->bdi_dirty,
  584. __entry->dirty_ratelimit,
  585. __entry->task_ratelimit,
  586. __entry->dirtied,
  587. __entry->dirtied_pause,
  588. __entry->paused, /* ms */
  589. __entry->pause, /* ms */
  590. __entry->period, /* ms */
  591. __entry->think, /* ms */
  592. (unsigned long)__entry->cgroup_ino
  593. )
  594. );
  595. TRACE_EVENT(writeback_sb_inodes_requeue,
  596. TP_PROTO(struct inode *inode),
  597. TP_ARGS(inode),
  598. TP_STRUCT__entry(
  599. __array(char, name, 32)
  600. __field(ino_t, ino)
  601. __field(unsigned long, state)
  602. __field(unsigned long, dirtied_when)
  603. __field(ino_t, cgroup_ino)
  604. ),
  605. TP_fast_assign(
  606. strscpy_pad(__entry->name,
  607. bdi_dev_name(inode_to_bdi(inode)), 32);
  608. __entry->ino = inode->i_ino;
  609. __entry->state = inode->i_state;
  610. __entry->dirtied_when = inode->dirtied_when;
  611. __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
  612. ),
  613. TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
  614. __entry->name,
  615. (unsigned long)__entry->ino,
  616. show_inode_state(__entry->state),
  617. __entry->dirtied_when,
  618. (jiffies - __entry->dirtied_when) / HZ,
  619. (unsigned long)__entry->cgroup_ino
  620. )
  621. );
  622. DECLARE_EVENT_CLASS(writeback_single_inode_template,
  623. TP_PROTO(struct inode *inode,
  624. struct writeback_control *wbc,
  625. unsigned long nr_to_write
  626. ),
  627. TP_ARGS(inode, wbc, nr_to_write),
  628. TP_STRUCT__entry(
  629. __array(char, name, 32)
  630. __field(ino_t, ino)
  631. __field(unsigned long, state)
  632. __field(unsigned long, dirtied_when)
  633. __field(unsigned long, writeback_index)
  634. __field(long, nr_to_write)
  635. __field(unsigned long, wrote)
  636. __field(ino_t, cgroup_ino)
  637. ),
  638. TP_fast_assign(
  639. strscpy_pad(__entry->name,
  640. bdi_dev_name(inode_to_bdi(inode)), 32);
  641. __entry->ino = inode->i_ino;
  642. __entry->state = inode->i_state;
  643. __entry->dirtied_when = inode->dirtied_when;
  644. __entry->writeback_index = inode->i_mapping->writeback_index;
  645. __entry->nr_to_write = nr_to_write;
  646. __entry->wrote = nr_to_write - wbc->nr_to_write;
  647. __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
  648. ),
  649. TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
  650. "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
  651. __entry->name,
  652. (unsigned long)__entry->ino,
  653. show_inode_state(__entry->state),
  654. __entry->dirtied_when,
  655. (jiffies - __entry->dirtied_when) / HZ,
  656. __entry->writeback_index,
  657. __entry->nr_to_write,
  658. __entry->wrote,
  659. (unsigned long)__entry->cgroup_ino
  660. )
  661. );
  662. DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
  663. TP_PROTO(struct inode *inode,
  664. struct writeback_control *wbc,
  665. unsigned long nr_to_write),
  666. TP_ARGS(inode, wbc, nr_to_write)
  667. );
  668. DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
  669. TP_PROTO(struct inode *inode,
  670. struct writeback_control *wbc,
  671. unsigned long nr_to_write),
  672. TP_ARGS(inode, wbc, nr_to_write)
  673. );
  674. DECLARE_EVENT_CLASS(writeback_inode_template,
  675. TP_PROTO(struct inode *inode),
  676. TP_ARGS(inode),
  677. TP_STRUCT__entry(
  678. __field( dev_t, dev )
  679. __field( ino_t, ino )
  680. __field(unsigned long, state )
  681. __field( __u16, mode )
  682. __field(unsigned long, dirtied_when )
  683. ),
  684. TP_fast_assign(
  685. __entry->dev = inode->i_sb->s_dev;
  686. __entry->ino = inode->i_ino;
  687. __entry->state = inode->i_state;
  688. __entry->mode = inode->i_mode;
  689. __entry->dirtied_when = inode->dirtied_when;
  690. ),
  691. TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
  692. MAJOR(__entry->dev), MINOR(__entry->dev),
  693. (unsigned long)__entry->ino, __entry->dirtied_when,
  694. show_inode_state(__entry->state), __entry->mode)
  695. );
  696. DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
  697. TP_PROTO(struct inode *inode),
  698. TP_ARGS(inode)
  699. );
  700. DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
  701. TP_PROTO(struct inode *inode),
  702. TP_ARGS(inode)
  703. );
  704. DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
  705. TP_PROTO(struct inode *inode),
  706. TP_ARGS(inode)
  707. );
  708. /*
  709. * Inode writeback list tracking.
  710. */
  711. DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
  712. TP_PROTO(struct inode *inode),
  713. TP_ARGS(inode)
  714. );
  715. DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
  716. TP_PROTO(struct inode *inode),
  717. TP_ARGS(inode)
  718. );
  719. #endif /* _TRACE_WRITEBACK_H */
  720. /* This part must be outside protection */
  721. #include <trace/define_trace.h>