trace_gfs2.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM gfs2
  4. #if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_GFS2_H
  6. #include <linux/tracepoint.h>
  7. #include <linux/fs.h>
  8. #include <linux/buffer_head.h>
  9. #include <linux/dlmconstants.h>
  10. #include <linux/gfs2_ondisk.h>
  11. #include <linux/writeback.h>
  12. #include <linux/ktime.h>
  13. #include <linux/iomap.h>
  14. #include "incore.h"
  15. #include "glock.h"
  16. #include "rgrp.h"
  17. #define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
  18. #define glock_trace_name(x) __print_symbolic(x, \
  19. dlm_state_name(IV), \
  20. dlm_state_name(NL), \
  21. dlm_state_name(CR), \
  22. dlm_state_name(CW), \
  23. dlm_state_name(PR), \
  24. dlm_state_name(PW), \
  25. dlm_state_name(EX))
  26. #define block_state_name(x) __print_symbolic(x, \
  27. { GFS2_BLKST_FREE, "free" }, \
  28. { GFS2_BLKST_USED, "used" }, \
  29. { GFS2_BLKST_DINODE, "dinode" }, \
  30. { GFS2_BLKST_UNLINKED, "unlinked" })
  31. #define TRACE_RS_DELETE 0
  32. #define TRACE_RS_TREEDEL 1
  33. #define TRACE_RS_INSERT 2
  34. #define TRACE_RS_CLAIM 3
  35. #define rs_func_name(x) __print_symbolic(x, \
  36. { 0, "del " }, \
  37. { 1, "tdel" }, \
  38. { 2, "ins " }, \
  39. { 3, "clm " })
  40. #define show_glock_flags(flags) __print_flags(flags, "", \
  41. {(1UL << GLF_LOCK), "l" }, \
  42. {(1UL << GLF_DEMOTE), "D" }, \
  43. {(1UL << GLF_PENDING_DEMOTE), "d" }, \
  44. {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
  45. {(1UL << GLF_DIRTY), "y" }, \
  46. {(1UL << GLF_LFLUSH), "f" }, \
  47. {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
  48. {(1UL << GLF_REPLY_PENDING), "r" }, \
  49. {(1UL << GLF_INITIAL), "I" }, \
  50. {(1UL << GLF_FROZEN), "F" }, \
  51. {(1UL << GLF_LRU), "L" }, \
  52. {(1UL << GLF_OBJECT), "o" }, \
  53. {(1UL << GLF_BLOCKING), "b" })
  54. #ifndef NUMPTY
  55. #define NUMPTY
  56. static inline u8 glock_trace_state(unsigned int state)
  57. {
  58. switch(state) {
  59. case LM_ST_SHARED:
  60. return DLM_LOCK_PR;
  61. case LM_ST_DEFERRED:
  62. return DLM_LOCK_CW;
  63. case LM_ST_EXCLUSIVE:
  64. return DLM_LOCK_EX;
  65. }
  66. return DLM_LOCK_NL;
  67. }
  68. #endif
  69. /* Section 1 - Locking
  70. *
  71. * Objectives:
  72. * Latency: Remote demote request to state change
  73. * Latency: Local lock request to state change
  74. * Latency: State change to lock grant
  75. * Correctness: Ordering of local lock state vs. I/O requests
  76. * Correctness: Responses to remote demote requests
  77. */
  78. /* General glock state change (DLM lock request completes) */
  79. TRACE_EVENT(gfs2_glock_state_change,
  80. TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
  81. TP_ARGS(gl, new_state),
  82. TP_STRUCT__entry(
  83. __field( dev_t, dev )
  84. __field( u64, glnum )
  85. __field( u32, gltype )
  86. __field( u8, cur_state )
  87. __field( u8, new_state )
  88. __field( u8, dmt_state )
  89. __field( u8, tgt_state )
  90. __field( unsigned long, flags )
  91. ),
  92. TP_fast_assign(
  93. __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
  94. __entry->glnum = gl->gl_name.ln_number;
  95. __entry->gltype = gl->gl_name.ln_type;
  96. __entry->cur_state = glock_trace_state(gl->gl_state);
  97. __entry->new_state = glock_trace_state(new_state);
  98. __entry->tgt_state = glock_trace_state(gl->gl_target);
  99. __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
  100. __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
  101. ),
  102. TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
  103. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  104. (unsigned long long)__entry->glnum,
  105. glock_trace_name(__entry->cur_state),
  106. glock_trace_name(__entry->new_state),
  107. glock_trace_name(__entry->tgt_state),
  108. glock_trace_name(__entry->dmt_state),
  109. show_glock_flags(__entry->flags))
  110. );
  111. /* State change -> unlocked, glock is being deallocated */
  112. TRACE_EVENT(gfs2_glock_put,
  113. TP_PROTO(const struct gfs2_glock *gl),
  114. TP_ARGS(gl),
  115. TP_STRUCT__entry(
  116. __field( dev_t, dev )
  117. __field( u64, glnum )
  118. __field( u32, gltype )
  119. __field( u8, cur_state )
  120. __field( unsigned long, flags )
  121. ),
  122. TP_fast_assign(
  123. __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
  124. __entry->gltype = gl->gl_name.ln_type;
  125. __entry->glnum = gl->gl_name.ln_number;
  126. __entry->cur_state = glock_trace_state(gl->gl_state);
  127. __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
  128. ),
  129. TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
  130. MAJOR(__entry->dev), MINOR(__entry->dev),
  131. __entry->gltype, (unsigned long long)__entry->glnum,
  132. glock_trace_name(__entry->cur_state),
  133. glock_trace_name(DLM_LOCK_IV),
  134. show_glock_flags(__entry->flags))
  135. );
  136. /* Callback (local or remote) requesting lock demotion */
  137. TRACE_EVENT(gfs2_demote_rq,
  138. TP_PROTO(const struct gfs2_glock *gl, bool remote),
  139. TP_ARGS(gl, remote),
  140. TP_STRUCT__entry(
  141. __field( dev_t, dev )
  142. __field( u64, glnum )
  143. __field( u32, gltype )
  144. __field( u8, cur_state )
  145. __field( u8, dmt_state )
  146. __field( unsigned long, flags )
  147. __field( bool, remote )
  148. ),
  149. TP_fast_assign(
  150. __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
  151. __entry->gltype = gl->gl_name.ln_type;
  152. __entry->glnum = gl->gl_name.ln_number;
  153. __entry->cur_state = glock_trace_state(gl->gl_state);
  154. __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
  155. __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
  156. __entry->remote = remote;
  157. ),
  158. TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s %s",
  159. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  160. (unsigned long long)__entry->glnum,
  161. glock_trace_name(__entry->cur_state),
  162. glock_trace_name(__entry->dmt_state),
  163. show_glock_flags(__entry->flags),
  164. __entry->remote ? "remote" : "local")
  165. );
  166. /* Promotion/grant of a glock */
  167. TRACE_EVENT(gfs2_promote,
  168. TP_PROTO(const struct gfs2_holder *gh),
  169. TP_ARGS(gh),
  170. TP_STRUCT__entry(
  171. __field( dev_t, dev )
  172. __field( u64, glnum )
  173. __field( u32, gltype )
  174. __field( u8, state )
  175. ),
  176. TP_fast_assign(
  177. __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
  178. __entry->glnum = gh->gh_gl->gl_name.ln_number;
  179. __entry->gltype = gh->gh_gl->gl_name.ln_type;
  180. __entry->state = glock_trace_state(gh->gh_state);
  181. ),
  182. TP_printk("%u,%u glock %u:%llu promote %s",
  183. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  184. (unsigned long long)__entry->glnum,
  185. glock_trace_name(__entry->state))
  186. );
  187. /* Queue/dequeue a lock request */
  188. TRACE_EVENT(gfs2_glock_queue,
  189. TP_PROTO(const struct gfs2_holder *gh, int queue),
  190. TP_ARGS(gh, queue),
  191. TP_STRUCT__entry(
  192. __field( dev_t, dev )
  193. __field( u64, glnum )
  194. __field( u32, gltype )
  195. __field( int, queue )
  196. __field( u8, state )
  197. ),
  198. TP_fast_assign(
  199. __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
  200. __entry->glnum = gh->gh_gl->gl_name.ln_number;
  201. __entry->gltype = gh->gh_gl->gl_name.ln_type;
  202. __entry->queue = queue;
  203. __entry->state = glock_trace_state(gh->gh_state);
  204. ),
  205. TP_printk("%u,%u glock %u:%llu %squeue %s",
  206. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  207. (unsigned long long)__entry->glnum,
  208. __entry->queue ? "" : "de",
  209. glock_trace_name(__entry->state))
  210. );
  211. /* DLM sends a reply to GFS2 */
  212. TRACE_EVENT(gfs2_glock_lock_time,
  213. TP_PROTO(const struct gfs2_glock *gl, s64 tdiff),
  214. TP_ARGS(gl, tdiff),
  215. TP_STRUCT__entry(
  216. __field( dev_t, dev )
  217. __field( u64, glnum )
  218. __field( u32, gltype )
  219. __field( int, status )
  220. __field( char, flags )
  221. __field( s64, tdiff )
  222. __field( u64, srtt )
  223. __field( u64, srttvar )
  224. __field( u64, srttb )
  225. __field( u64, srttvarb )
  226. __field( u64, sirt )
  227. __field( u64, sirtvar )
  228. __field( u64, dcount )
  229. __field( u64, qcount )
  230. ),
  231. TP_fast_assign(
  232. __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
  233. __entry->glnum = gl->gl_name.ln_number;
  234. __entry->gltype = gl->gl_name.ln_type;
  235. __entry->status = gl->gl_lksb.sb_status;
  236. __entry->flags = gl->gl_lksb.sb_flags;
  237. __entry->tdiff = tdiff;
  238. __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT];
  239. __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR];
  240. __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
  241. __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
  242. __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT];
  243. __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR];
  244. __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
  245. __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT];
  246. ),
  247. TP_printk("%u,%u glock %d:%lld status:%d flags:%02x tdiff:%lld srtt:%lld/%lld srttb:%lld/%lld sirt:%lld/%lld dcnt:%lld qcnt:%lld",
  248. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
  249. (unsigned long long)__entry->glnum,
  250. __entry->status, __entry->flags,
  251. (long long)__entry->tdiff,
  252. (long long)__entry->srtt,
  253. (long long)__entry->srttvar,
  254. (long long)__entry->srttb,
  255. (long long)__entry->srttvarb,
  256. (long long)__entry->sirt,
  257. (long long)__entry->sirtvar,
  258. (long long)__entry->dcount,
  259. (long long)__entry->qcount)
  260. );
  261. /* Section 2 - Log/journal
  262. *
  263. * Objectives:
  264. * Latency: Log flush time
  265. * Correctness: pin/unpin vs. disk I/O ordering
  266. * Performance: Log usage stats
  267. */
  268. /* Pin/unpin a block in the log */
  269. TRACE_EVENT(gfs2_pin,
  270. TP_PROTO(const struct gfs2_bufdata *bd, int pin),
  271. TP_ARGS(bd, pin),
  272. TP_STRUCT__entry(
  273. __field( dev_t, dev )
  274. __field( int, pin )
  275. __field( u32, len )
  276. __field( sector_t, block )
  277. __field( u64, ino )
  278. ),
  279. TP_fast_assign(
  280. __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
  281. __entry->pin = pin;
  282. __entry->len = bd->bd_bh->b_size;
  283. __entry->block = bd->bd_bh->b_blocknr;
  284. __entry->ino = bd->bd_gl->gl_name.ln_number;
  285. ),
  286. TP_printk("%u,%u log %s %llu/%lu inode %llu",
  287. MAJOR(__entry->dev), MINOR(__entry->dev),
  288. __entry->pin ? "pin" : "unpin",
  289. (unsigned long long)__entry->block,
  290. (unsigned long)__entry->len,
  291. (unsigned long long)__entry->ino)
  292. );
  293. /* Flushing the log */
  294. TRACE_EVENT(gfs2_log_flush,
  295. TP_PROTO(const struct gfs2_sbd *sdp, int start, u32 flags),
  296. TP_ARGS(sdp, start, flags),
  297. TP_STRUCT__entry(
  298. __field( dev_t, dev )
  299. __field( int, start )
  300. __field( u64, log_seq )
  301. __field( u32, flags )
  302. ),
  303. TP_fast_assign(
  304. __entry->dev = sdp->sd_vfs->s_dev;
  305. __entry->start = start;
  306. __entry->log_seq = sdp->sd_log_sequence;
  307. __entry->flags = flags;
  308. ),
  309. TP_printk("%u,%u log flush %s %llu %llx",
  310. MAJOR(__entry->dev), MINOR(__entry->dev),
  311. __entry->start ? "start" : "end",
  312. (unsigned long long)__entry->log_seq,
  313. (unsigned long long)__entry->flags)
  314. );
  315. /* Reserving/releasing blocks in the log */
  316. TRACE_EVENT(gfs2_log_blocks,
  317. TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
  318. TP_ARGS(sdp, blocks),
  319. TP_STRUCT__entry(
  320. __field( dev_t, dev )
  321. __field( int, blocks )
  322. __field( int, blks_free )
  323. ),
  324. TP_fast_assign(
  325. __entry->dev = sdp->sd_vfs->s_dev;
  326. __entry->blocks = blocks;
  327. __entry->blks_free = atomic_read(&sdp->sd_log_blks_free);
  328. ),
  329. TP_printk("%u,%u log reserve %d %d", MAJOR(__entry->dev),
  330. MINOR(__entry->dev), __entry->blocks, __entry->blks_free)
  331. );
  332. /* Writing back the AIL */
  333. TRACE_EVENT(gfs2_ail_flush,
  334. TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
  335. TP_ARGS(sdp, wbc, start),
  336. TP_STRUCT__entry(
  337. __field( dev_t, dev )
  338. __field( int, start )
  339. __field( int, sync_mode )
  340. __field( long, nr_to_write )
  341. ),
  342. TP_fast_assign(
  343. __entry->dev = sdp->sd_vfs->s_dev;
  344. __entry->start = start;
  345. __entry->sync_mode = wbc->sync_mode;
  346. __entry->nr_to_write = wbc->nr_to_write;
  347. ),
  348. TP_printk("%u,%u ail flush %s %s %ld", MAJOR(__entry->dev),
  349. MINOR(__entry->dev), __entry->start ? "start" : "end",
  350. __entry->sync_mode == WB_SYNC_ALL ? "all" : "none",
  351. __entry->nr_to_write)
  352. );
  353. /* Section 3 - bmap
  354. *
  355. * Objectives:
  356. * Latency: Bmap request time
  357. * Performance: Block allocator tracing
  358. * Correctness: Test of disard generation vs. blocks allocated
  359. */
  360. /* Map an extent of blocks, possibly a new allocation */
  361. TRACE_EVENT(gfs2_bmap,
  362. TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
  363. sector_t lblock, int create, int errno),
  364. TP_ARGS(ip, bh, lblock, create, errno),
  365. TP_STRUCT__entry(
  366. __field( dev_t, dev )
  367. __field( sector_t, lblock )
  368. __field( sector_t, pblock )
  369. __field( u64, inum )
  370. __field( unsigned long, state )
  371. __field( u32, len )
  372. __field( int, create )
  373. __field( int, errno )
  374. ),
  375. TP_fast_assign(
  376. __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
  377. __entry->lblock = lblock;
  378. __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
  379. __entry->inum = ip->i_no_addr;
  380. __entry->state = bh->b_state;
  381. __entry->len = bh->b_size;
  382. __entry->create = create;
  383. __entry->errno = errno;
  384. ),
  385. TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
  386. MAJOR(__entry->dev), MINOR(__entry->dev),
  387. (unsigned long long)__entry->inum,
  388. (unsigned long long)__entry->lblock,
  389. (unsigned long)__entry->len,
  390. (unsigned long long)__entry->pblock,
  391. __entry->state, __entry->create ? "create " : "nocreate",
  392. __entry->errno)
  393. );
  394. TRACE_EVENT(gfs2_iomap_start,
  395. TP_PROTO(const struct gfs2_inode *ip, loff_t pos, ssize_t length,
  396. u16 flags),
  397. TP_ARGS(ip, pos, length, flags),
  398. TP_STRUCT__entry(
  399. __field( dev_t, dev )
  400. __field( u64, inum )
  401. __field( loff_t, pos )
  402. __field( ssize_t, length )
  403. __field( u16, flags )
  404. ),
  405. TP_fast_assign(
  406. __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
  407. __entry->inum = ip->i_no_addr;
  408. __entry->pos = pos;
  409. __entry->length = length;
  410. __entry->flags = flags;
  411. ),
  412. TP_printk("%u,%u bmap %llu iomap start %llu/%lu flags:%08x",
  413. MAJOR(__entry->dev), MINOR(__entry->dev),
  414. (unsigned long long)__entry->inum,
  415. (unsigned long long)__entry->pos,
  416. (unsigned long)__entry->length, (u16)__entry->flags)
  417. );
  418. TRACE_EVENT(gfs2_iomap_end,
  419. TP_PROTO(const struct gfs2_inode *ip, struct iomap *iomap, int ret),
  420. TP_ARGS(ip, iomap, ret),
  421. TP_STRUCT__entry(
  422. __field( dev_t, dev )
  423. __field( u64, inum )
  424. __field( loff_t, offset )
  425. __field( ssize_t, length )
  426. __field( sector_t, pblock )
  427. __field( u16, flags )
  428. __field( u16, type )
  429. __field( int, ret )
  430. ),
  431. TP_fast_assign(
  432. __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
  433. __entry->inum = ip->i_no_addr;
  434. __entry->offset = iomap->offset;
  435. __entry->length = iomap->length;
  436. __entry->pblock = iomap->addr == IOMAP_NULL_ADDR ? 0 :
  437. (iomap->addr >> ip->i_inode.i_blkbits);
  438. __entry->flags = iomap->flags;
  439. __entry->type = iomap->type;
  440. __entry->ret = ret;
  441. ),
  442. TP_printk("%u,%u bmap %llu iomap end %llu/%lu to %llu ty:%d flags:%08x rc:%d",
  443. MAJOR(__entry->dev), MINOR(__entry->dev),
  444. (unsigned long long)__entry->inum,
  445. (unsigned long long)__entry->offset,
  446. (unsigned long)__entry->length,
  447. (long long)__entry->pblock,
  448. (u16)__entry->type,
  449. (u16)__entry->flags, __entry->ret)
  450. );
  451. /* Keep track of blocks as they are allocated/freed */
  452. TRACE_EVENT(gfs2_block_alloc,
  453. TP_PROTO(const struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
  454. u64 block, unsigned len, u8 block_state),
  455. TP_ARGS(ip, rgd, block, len, block_state),
  456. TP_STRUCT__entry(
  457. __field( dev_t, dev )
  458. __field( u64, start )
  459. __field( u64, inum )
  460. __field( u32, len )
  461. __field( u8, block_state )
  462. __field( u64, rd_addr )
  463. __field( u32, rd_free_clone )
  464. __field( u32, rd_requested )
  465. __field( u32, rd_reserved )
  466. ),
  467. TP_fast_assign(
  468. __entry->dev = rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
  469. __entry->start = block;
  470. __entry->inum = ip->i_no_addr;
  471. __entry->len = len;
  472. __entry->block_state = block_state;
  473. __entry->rd_addr = rgd->rd_addr;
  474. __entry->rd_free_clone = rgd->rd_free_clone;
  475. __entry->rd_requested = rgd->rd_requested;
  476. __entry->rd_reserved = rgd->rd_reserved;
  477. ),
  478. TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rq:%u rr:%u",
  479. MAJOR(__entry->dev), MINOR(__entry->dev),
  480. (unsigned long long)__entry->inum,
  481. (unsigned long long)__entry->start,
  482. (unsigned long)__entry->len,
  483. block_state_name(__entry->block_state),
  484. (unsigned long long)__entry->rd_addr,
  485. __entry->rd_free_clone,
  486. __entry->rd_requested,
  487. __entry->rd_reserved)
  488. );
  489. /* Keep track of multi-block reservations as they are allocated/freed */
  490. TRACE_EVENT(gfs2_rs,
  491. TP_PROTO(const struct gfs2_blkreserv *rs, u8 func),
  492. TP_ARGS(rs, func),
  493. TP_STRUCT__entry(
  494. __field( dev_t, dev )
  495. __field( u64, rd_addr )
  496. __field( u32, rd_free_clone )
  497. __field( u32, rd_requested )
  498. __field( u32, rd_reserved )
  499. __field( u64, inum )
  500. __field( u64, start )
  501. __field( u32, requested )
  502. __field( u32, reserved )
  503. __field( u8, func )
  504. ),
  505. TP_fast_assign(
  506. __entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev;
  507. __entry->rd_addr = rs->rs_rgd->rd_addr;
  508. __entry->rd_free_clone = rs->rs_rgd->rd_free_clone;
  509. __entry->rd_requested = rs->rs_rgd->rd_requested;
  510. __entry->rd_reserved = rs->rs_rgd->rd_reserved;
  511. __entry->inum = container_of(rs, struct gfs2_inode,
  512. i_res)->i_no_addr;
  513. __entry->start = rs->rs_start;
  514. __entry->requested = rs->rs_requested;
  515. __entry->reserved = rs->rs_reserved;
  516. __entry->func = func;
  517. ),
  518. TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%u rq:%u rr:%u %s q:%u r:%u",
  519. MAJOR(__entry->dev), MINOR(__entry->dev),
  520. (unsigned long long)__entry->inum,
  521. (unsigned long long)__entry->start,
  522. (unsigned long long)__entry->rd_addr,
  523. __entry->rd_free_clone,
  524. __entry->rd_requested,
  525. __entry->rd_reserved,
  526. rs_func_name(__entry->func),
  527. __entry->requested,
  528. __entry->reserved)
  529. );
  530. #endif /* _TRACE_GFS2_H */
  531. /* This part must be outside protection */
  532. #undef TRACE_INCLUDE_PATH
  533. #define TRACE_INCLUDE_PATH .
  534. #define TRACE_INCLUDE_FILE trace_gfs2
  535. #include <trace/define_trace.h>