xfs_trace.h 132 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2009, Christoph Hellwig
  4. * All Rights Reserved.
  5. *
  6. * NOTE: none of these tracepoints shall be considered a stable kernel ABI
  7. * as they can change at any time.
  8. *
  9. * Current conventions for printing numbers measuring specific units:
  10. *
  11. * agno: allocation group number
  12. *
  13. * agino: per-AG inode number
  14. * ino: filesystem inode number
  15. *
  16. * agbno: per-AG block number in fs blocks
  17. * startblock: physical block number for file mappings. This is either a
  18. * segmented fsblock for data device mappings, or a rfsblock
  19. * for realtime device mappings
  20. * fsbcount: number of blocks in an extent, in fs blocks
  21. *
  22. * daddr: physical block number in 512b blocks
  23. * bbcount: number of blocks in a physical extent, in 512b blocks
  24. *
  25. * owner: reverse-mapping owner, usually inodes
  26. *
  27. * fileoff: file offset, in fs blocks
  28. * pos: file offset, in bytes
  29. * bytecount: number of bytes
  30. *
  31. * disize: ondisk file size, in bytes
  32. * isize: incore file size, in bytes
  33. *
  34. * forkoff: inode fork offset, in bytes
  35. *
  36. * ireccount: number of inode records
  37. *
  38. * Numbers describing space allocations (blocks, extents, inodes) should be
  39. * formatted in hexadecimal.
  40. */
  41. #undef TRACE_SYSTEM
  42. #define TRACE_SYSTEM xfs
  43. #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
  44. #define _TRACE_XFS_H
  45. #include <linux/tracepoint.h>
  46. struct xfs_agf;
  47. struct xfs_alloc_arg;
  48. struct xfs_attr_list_context;
  49. struct xfs_buf_log_item;
  50. struct xfs_da_args;
  51. struct xfs_da_node_entry;
  52. struct xfs_dquot;
  53. struct xfs_log_item;
  54. struct xlog;
  55. struct xlog_ticket;
  56. struct xlog_recover;
  57. struct xlog_recover_item;
  58. struct xlog_rec_header;
  59. struct xlog_in_core;
  60. struct xfs_buf_log_format;
  61. struct xfs_inode_log_format;
  62. struct xfs_bmbt_irec;
  63. struct xfs_btree_cur;
  64. struct xfs_refcount_irec;
  65. struct xfs_fsmap;
  66. struct xfs_rmap_irec;
  67. struct xfs_icreate_log;
  68. struct xfs_owner_info;
  69. struct xfs_trans_res;
  70. struct xfs_inobt_rec_incore;
  71. union xfs_btree_ptr;
  72. struct xfs_dqtrx;
  73. struct xfs_icwalk;
  74. #define XFS_ATTR_FILTER_FLAGS \
  75. { XFS_ATTR_ROOT, "ROOT" }, \
  76. { XFS_ATTR_SECURE, "SECURE" }, \
  77. { XFS_ATTR_INCOMPLETE, "INCOMPLETE" }
  78. DECLARE_EVENT_CLASS(xfs_attr_list_class,
  79. TP_PROTO(struct xfs_attr_list_context *ctx),
  80. TP_ARGS(ctx),
  81. TP_STRUCT__entry(
  82. __field(dev_t, dev)
  83. __field(xfs_ino_t, ino)
  84. __field(u32, hashval)
  85. __field(u32, blkno)
  86. __field(u32, offset)
  87. __field(void *, buffer)
  88. __field(int, bufsize)
  89. __field(int, count)
  90. __field(int, firstu)
  91. __field(int, dupcnt)
  92. __field(unsigned int, attr_filter)
  93. ),
  94. TP_fast_assign(
  95. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  96. __entry->ino = ctx->dp->i_ino;
  97. __entry->hashval = ctx->cursor.hashval;
  98. __entry->blkno = ctx->cursor.blkno;
  99. __entry->offset = ctx->cursor.offset;
  100. __entry->buffer = ctx->buffer;
  101. __entry->bufsize = ctx->bufsize;
  102. __entry->count = ctx->count;
  103. __entry->firstu = ctx->firstu;
  104. __entry->attr_filter = ctx->attr_filter;
  105. ),
  106. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  107. "buffer %p size %u count %u firstu %u filter %s",
  108. MAJOR(__entry->dev), MINOR(__entry->dev),
  109. __entry->ino,
  110. __entry->hashval,
  111. __entry->blkno,
  112. __entry->offset,
  113. __entry->dupcnt,
  114. __entry->buffer,
  115. __entry->bufsize,
  116. __entry->count,
  117. __entry->firstu,
  118. __print_flags(__entry->attr_filter, "|",
  119. XFS_ATTR_FILTER_FLAGS)
  120. )
  121. )
  122. #define DEFINE_ATTR_LIST_EVENT(name) \
  123. DEFINE_EVENT(xfs_attr_list_class, name, \
  124. TP_PROTO(struct xfs_attr_list_context *ctx), \
  125. TP_ARGS(ctx))
  126. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
  127. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
  128. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
  129. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
  130. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
  131. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
  132. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
  133. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
  134. DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list);
  135. DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list);
  136. TRACE_EVENT(xlog_intent_recovery_failed,
  137. TP_PROTO(struct xfs_mount *mp, int error, void *function),
  138. TP_ARGS(mp, error, function),
  139. TP_STRUCT__entry(
  140. __field(dev_t, dev)
  141. __field(int, error)
  142. __field(void *, function)
  143. ),
  144. TP_fast_assign(
  145. __entry->dev = mp->m_super->s_dev;
  146. __entry->error = error;
  147. __entry->function = function;
  148. ),
  149. TP_printk("dev %d:%d error %d function %pS",
  150. MAJOR(__entry->dev), MINOR(__entry->dev),
  151. __entry->error, __entry->function)
  152. );
  153. DECLARE_EVENT_CLASS(xfs_perag_class,
  154. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount,
  155. unsigned long caller_ip),
  156. TP_ARGS(mp, agno, refcount, caller_ip),
  157. TP_STRUCT__entry(
  158. __field(dev_t, dev)
  159. __field(xfs_agnumber_t, agno)
  160. __field(int, refcount)
  161. __field(unsigned long, caller_ip)
  162. ),
  163. TP_fast_assign(
  164. __entry->dev = mp->m_super->s_dev;
  165. __entry->agno = agno;
  166. __entry->refcount = refcount;
  167. __entry->caller_ip = caller_ip;
  168. ),
  169. TP_printk("dev %d:%d agno 0x%x refcount %d caller %pS",
  170. MAJOR(__entry->dev), MINOR(__entry->dev),
  171. __entry->agno,
  172. __entry->refcount,
  173. (char *)__entry->caller_ip)
  174. );
  175. #define DEFINE_PERAG_REF_EVENT(name) \
  176. DEFINE_EVENT(xfs_perag_class, name, \
  177. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
  178. unsigned long caller_ip), \
  179. TP_ARGS(mp, agno, refcount, caller_ip))
  180. DEFINE_PERAG_REF_EVENT(xfs_perag_get);
  181. DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
  182. DEFINE_PERAG_REF_EVENT(xfs_perag_put);
  183. DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
  184. DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
  185. TRACE_EVENT(xfs_inodegc_worker,
  186. TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits),
  187. TP_ARGS(mp, shrinker_hits),
  188. TP_STRUCT__entry(
  189. __field(dev_t, dev)
  190. __field(unsigned int, shrinker_hits)
  191. ),
  192. TP_fast_assign(
  193. __entry->dev = mp->m_super->s_dev;
  194. __entry->shrinker_hits = shrinker_hits;
  195. ),
  196. TP_printk("dev %d:%d shrinker_hits %u",
  197. MAJOR(__entry->dev), MINOR(__entry->dev),
  198. __entry->shrinker_hits)
  199. );
  200. DECLARE_EVENT_CLASS(xfs_fs_class,
  201. TP_PROTO(struct xfs_mount *mp, void *caller_ip),
  202. TP_ARGS(mp, caller_ip),
  203. TP_STRUCT__entry(
  204. __field(dev_t, dev)
  205. __field(unsigned long long, mflags)
  206. __field(unsigned long, opstate)
  207. __field(unsigned long, sbflags)
  208. __field(void *, caller_ip)
  209. ),
  210. TP_fast_assign(
  211. if (mp) {
  212. __entry->dev = mp->m_super->s_dev;
  213. __entry->mflags = mp->m_features;
  214. __entry->opstate = mp->m_opstate;
  215. __entry->sbflags = mp->m_super->s_flags;
  216. }
  217. __entry->caller_ip = caller_ip;
  218. ),
  219. TP_printk("dev %d:%d m_features 0x%llx opstate (%s) s_flags 0x%lx caller %pS",
  220. MAJOR(__entry->dev), MINOR(__entry->dev),
  221. __entry->mflags,
  222. __print_flags(__entry->opstate, "|", XFS_OPSTATE_STRINGS),
  223. __entry->sbflags,
  224. __entry->caller_ip)
  225. );
  226. #define DEFINE_FS_EVENT(name) \
  227. DEFINE_EVENT(xfs_fs_class, name, \
  228. TP_PROTO(struct xfs_mount *mp, void *caller_ip), \
  229. TP_ARGS(mp, caller_ip))
  230. DEFINE_FS_EVENT(xfs_inodegc_flush);
  231. DEFINE_FS_EVENT(xfs_inodegc_push);
  232. DEFINE_FS_EVENT(xfs_inodegc_start);
  233. DEFINE_FS_EVENT(xfs_inodegc_stop);
  234. DEFINE_FS_EVENT(xfs_inodegc_queue);
  235. DEFINE_FS_EVENT(xfs_inodegc_throttle);
  236. DEFINE_FS_EVENT(xfs_fs_sync_fs);
  237. DEFINE_FS_EVENT(xfs_blockgc_start);
  238. DEFINE_FS_EVENT(xfs_blockgc_stop);
  239. DEFINE_FS_EVENT(xfs_blockgc_worker);
  240. DEFINE_FS_EVENT(xfs_blockgc_flush_all);
  241. TRACE_EVENT(xfs_inodegc_shrinker_scan,
  242. TP_PROTO(struct xfs_mount *mp, struct shrink_control *sc,
  243. void *caller_ip),
  244. TP_ARGS(mp, sc, caller_ip),
  245. TP_STRUCT__entry(
  246. __field(dev_t, dev)
  247. __field(unsigned long, nr_to_scan)
  248. __field(void *, caller_ip)
  249. ),
  250. TP_fast_assign(
  251. __entry->dev = mp->m_super->s_dev;
  252. __entry->nr_to_scan = sc->nr_to_scan;
  253. __entry->caller_ip = caller_ip;
  254. ),
  255. TP_printk("dev %d:%d nr_to_scan %lu caller %pS",
  256. MAJOR(__entry->dev), MINOR(__entry->dev),
  257. __entry->nr_to_scan,
  258. __entry->caller_ip)
  259. );
  260. DECLARE_EVENT_CLASS(xfs_ag_class,
  261. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
  262. TP_ARGS(mp, agno),
  263. TP_STRUCT__entry(
  264. __field(dev_t, dev)
  265. __field(xfs_agnumber_t, agno)
  266. ),
  267. TP_fast_assign(
  268. __entry->dev = mp->m_super->s_dev;
  269. __entry->agno = agno;
  270. ),
  271. TP_printk("dev %d:%d agno 0x%x",
  272. MAJOR(__entry->dev), MINOR(__entry->dev),
  273. __entry->agno)
  274. );
  275. #define DEFINE_AG_EVENT(name) \
  276. DEFINE_EVENT(xfs_ag_class, name, \
  277. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno), \
  278. TP_ARGS(mp, agno))
  279. DEFINE_AG_EVENT(xfs_read_agf);
  280. DEFINE_AG_EVENT(xfs_alloc_read_agf);
  281. DEFINE_AG_EVENT(xfs_read_agi);
  282. DEFINE_AG_EVENT(xfs_ialloc_read_agi);
  283. TRACE_EVENT(xfs_attr_list_node_descend,
  284. TP_PROTO(struct xfs_attr_list_context *ctx,
  285. struct xfs_da_node_entry *btree),
  286. TP_ARGS(ctx, btree),
  287. TP_STRUCT__entry(
  288. __field(dev_t, dev)
  289. __field(xfs_ino_t, ino)
  290. __field(u32, hashval)
  291. __field(u32, blkno)
  292. __field(u32, offset)
  293. __field(void *, buffer)
  294. __field(int, bufsize)
  295. __field(int, count)
  296. __field(int, firstu)
  297. __field(int, dupcnt)
  298. __field(unsigned int, attr_filter)
  299. __field(u32, bt_hashval)
  300. __field(u32, bt_before)
  301. ),
  302. TP_fast_assign(
  303. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  304. __entry->ino = ctx->dp->i_ino;
  305. __entry->hashval = ctx->cursor.hashval;
  306. __entry->blkno = ctx->cursor.blkno;
  307. __entry->offset = ctx->cursor.offset;
  308. __entry->buffer = ctx->buffer;
  309. __entry->bufsize = ctx->bufsize;
  310. __entry->count = ctx->count;
  311. __entry->firstu = ctx->firstu;
  312. __entry->attr_filter = ctx->attr_filter;
  313. __entry->bt_hashval = be32_to_cpu(btree->hashval);
  314. __entry->bt_before = be32_to_cpu(btree->before);
  315. ),
  316. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  317. "buffer %p size %u count %u firstu %u filter %s "
  318. "node hashval %u, node before %u",
  319. MAJOR(__entry->dev), MINOR(__entry->dev),
  320. __entry->ino,
  321. __entry->hashval,
  322. __entry->blkno,
  323. __entry->offset,
  324. __entry->dupcnt,
  325. __entry->buffer,
  326. __entry->bufsize,
  327. __entry->count,
  328. __entry->firstu,
  329. __print_flags(__entry->attr_filter, "|",
  330. XFS_ATTR_FILTER_FLAGS),
  331. __entry->bt_hashval,
  332. __entry->bt_before)
  333. );
  334. DECLARE_EVENT_CLASS(xfs_bmap_class,
  335. TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state,
  336. unsigned long caller_ip),
  337. TP_ARGS(ip, cur, state, caller_ip),
  338. TP_STRUCT__entry(
  339. __field(dev_t, dev)
  340. __field(xfs_ino_t, ino)
  341. __field(void *, leaf)
  342. __field(int, pos)
  343. __field(xfs_fileoff_t, startoff)
  344. __field(xfs_fsblock_t, startblock)
  345. __field(xfs_filblks_t, blockcount)
  346. __field(xfs_exntst_t, state)
  347. __field(int, bmap_state)
  348. __field(unsigned long, caller_ip)
  349. ),
  350. TP_fast_assign(
  351. struct xfs_ifork *ifp;
  352. struct xfs_bmbt_irec r;
  353. ifp = xfs_iext_state_to_fork(ip, state);
  354. xfs_iext_get_extent(ifp, cur, &r);
  355. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  356. __entry->ino = ip->i_ino;
  357. __entry->leaf = cur->leaf;
  358. __entry->pos = cur->pos;
  359. __entry->startoff = r.br_startoff;
  360. __entry->startblock = r.br_startblock;
  361. __entry->blockcount = r.br_blockcount;
  362. __entry->state = r.br_state;
  363. __entry->bmap_state = state;
  364. __entry->caller_ip = caller_ip;
  365. ),
  366. TP_printk("dev %d:%d ino 0x%llx state %s cur %p/%d "
  367. "fileoff 0x%llx startblock 0x%llx fsbcount 0x%llx flag %d caller %pS",
  368. MAJOR(__entry->dev), MINOR(__entry->dev),
  369. __entry->ino,
  370. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  371. __entry->leaf,
  372. __entry->pos,
  373. __entry->startoff,
  374. (int64_t)__entry->startblock,
  375. __entry->blockcount,
  376. __entry->state,
  377. (char *)__entry->caller_ip)
  378. )
  379. #define DEFINE_BMAP_EVENT(name) \
  380. DEFINE_EVENT(xfs_bmap_class, name, \
  381. TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, \
  382. unsigned long caller_ip), \
  383. TP_ARGS(ip, cur, state, caller_ip))
  384. DEFINE_BMAP_EVENT(xfs_iext_insert);
  385. DEFINE_BMAP_EVENT(xfs_iext_remove);
  386. DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
  387. DEFINE_BMAP_EVENT(xfs_bmap_post_update);
  388. DEFINE_BMAP_EVENT(xfs_read_extent);
  389. DEFINE_BMAP_EVENT(xfs_write_extent);
  390. DECLARE_EVENT_CLASS(xfs_buf_class,
  391. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
  392. TP_ARGS(bp, caller_ip),
  393. TP_STRUCT__entry(
  394. __field(dev_t, dev)
  395. __field(xfs_daddr_t, bno)
  396. __field(int, nblks)
  397. __field(int, hold)
  398. __field(int, pincount)
  399. __field(unsigned, lockval)
  400. __field(unsigned, flags)
  401. __field(unsigned long, caller_ip)
  402. __field(const void *, buf_ops)
  403. ),
  404. TP_fast_assign(
  405. __entry->dev = bp->b_target->bt_dev;
  406. __entry->bno = xfs_buf_daddr(bp);
  407. __entry->nblks = bp->b_length;
  408. __entry->hold = atomic_read(&bp->b_hold);
  409. __entry->pincount = atomic_read(&bp->b_pin_count);
  410. __entry->lockval = bp->b_sema.count;
  411. __entry->flags = bp->b_flags;
  412. __entry->caller_ip = caller_ip;
  413. __entry->buf_ops = bp->b_ops;
  414. ),
  415. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  416. "lock %d flags %s bufops %pS caller %pS",
  417. MAJOR(__entry->dev), MINOR(__entry->dev),
  418. (unsigned long long)__entry->bno,
  419. __entry->nblks,
  420. __entry->hold,
  421. __entry->pincount,
  422. __entry->lockval,
  423. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  424. __entry->buf_ops,
  425. (void *)__entry->caller_ip)
  426. )
  427. #define DEFINE_BUF_EVENT(name) \
  428. DEFINE_EVENT(xfs_buf_class, name, \
  429. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
  430. TP_ARGS(bp, caller_ip))
  431. DEFINE_BUF_EVENT(xfs_buf_init);
  432. DEFINE_BUF_EVENT(xfs_buf_free);
  433. DEFINE_BUF_EVENT(xfs_buf_hold);
  434. DEFINE_BUF_EVENT(xfs_buf_rele);
  435. DEFINE_BUF_EVENT(xfs_buf_iodone);
  436. DEFINE_BUF_EVENT(xfs_buf_submit);
  437. DEFINE_BUF_EVENT(xfs_buf_lock);
  438. DEFINE_BUF_EVENT(xfs_buf_lock_done);
  439. DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
  440. DEFINE_BUF_EVENT(xfs_buf_trylock);
  441. DEFINE_BUF_EVENT(xfs_buf_unlock);
  442. DEFINE_BUF_EVENT(xfs_buf_iowait);
  443. DEFINE_BUF_EVENT(xfs_buf_iowait_done);
  444. DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
  445. DEFINE_BUF_EVENT(xfs_buf_delwri_queued);
  446. DEFINE_BUF_EVENT(xfs_buf_delwri_split);
  447. DEFINE_BUF_EVENT(xfs_buf_delwri_pushbuf);
  448. DEFINE_BUF_EVENT(xfs_buf_get_uncached);
  449. DEFINE_BUF_EVENT(xfs_buf_item_relse);
  450. DEFINE_BUF_EVENT(xfs_buf_iodone_async);
  451. DEFINE_BUF_EVENT(xfs_buf_error_relse);
  452. DEFINE_BUF_EVENT(xfs_buf_drain_buftarg);
  453. DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
  454. /* not really buffer traces, but the buf provides useful information */
  455. DEFINE_BUF_EVENT(xfs_btree_corrupt);
  456. DEFINE_BUF_EVENT(xfs_reset_dqcounts);
  457. /* pass flags explicitly */
  458. DECLARE_EVENT_CLASS(xfs_buf_flags_class,
  459. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
  460. TP_ARGS(bp, flags, caller_ip),
  461. TP_STRUCT__entry(
  462. __field(dev_t, dev)
  463. __field(xfs_daddr_t, bno)
  464. __field(unsigned int, length)
  465. __field(int, hold)
  466. __field(int, pincount)
  467. __field(unsigned, lockval)
  468. __field(unsigned, flags)
  469. __field(unsigned long, caller_ip)
  470. ),
  471. TP_fast_assign(
  472. __entry->dev = bp->b_target->bt_dev;
  473. __entry->bno = xfs_buf_daddr(bp);
  474. __entry->length = bp->b_length;
  475. __entry->flags = flags;
  476. __entry->hold = atomic_read(&bp->b_hold);
  477. __entry->pincount = atomic_read(&bp->b_pin_count);
  478. __entry->lockval = bp->b_sema.count;
  479. __entry->caller_ip = caller_ip;
  480. ),
  481. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  482. "lock %d flags %s caller %pS",
  483. MAJOR(__entry->dev), MINOR(__entry->dev),
  484. (unsigned long long)__entry->bno,
  485. __entry->length,
  486. __entry->hold,
  487. __entry->pincount,
  488. __entry->lockval,
  489. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  490. (void *)__entry->caller_ip)
  491. )
  492. #define DEFINE_BUF_FLAGS_EVENT(name) \
  493. DEFINE_EVENT(xfs_buf_flags_class, name, \
  494. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
  495. TP_ARGS(bp, flags, caller_ip))
  496. DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
  497. DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
  498. DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
  499. TRACE_EVENT(xfs_buf_ioerror,
  500. TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip),
  501. TP_ARGS(bp, error, caller_ip),
  502. TP_STRUCT__entry(
  503. __field(dev_t, dev)
  504. __field(xfs_daddr_t, bno)
  505. __field(unsigned int, length)
  506. __field(unsigned, flags)
  507. __field(int, hold)
  508. __field(int, pincount)
  509. __field(unsigned, lockval)
  510. __field(int, error)
  511. __field(xfs_failaddr_t, caller_ip)
  512. ),
  513. TP_fast_assign(
  514. __entry->dev = bp->b_target->bt_dev;
  515. __entry->bno = xfs_buf_daddr(bp);
  516. __entry->length = bp->b_length;
  517. __entry->hold = atomic_read(&bp->b_hold);
  518. __entry->pincount = atomic_read(&bp->b_pin_count);
  519. __entry->lockval = bp->b_sema.count;
  520. __entry->error = error;
  521. __entry->flags = bp->b_flags;
  522. __entry->caller_ip = caller_ip;
  523. ),
  524. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  525. "lock %d error %d flags %s caller %pS",
  526. MAJOR(__entry->dev), MINOR(__entry->dev),
  527. (unsigned long long)__entry->bno,
  528. __entry->length,
  529. __entry->hold,
  530. __entry->pincount,
  531. __entry->lockval,
  532. __entry->error,
  533. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  534. (void *)__entry->caller_ip)
  535. );
  536. DECLARE_EVENT_CLASS(xfs_buf_item_class,
  537. TP_PROTO(struct xfs_buf_log_item *bip),
  538. TP_ARGS(bip),
  539. TP_STRUCT__entry(
  540. __field(dev_t, dev)
  541. __field(xfs_daddr_t, buf_bno)
  542. __field(unsigned int, buf_len)
  543. __field(int, buf_hold)
  544. __field(int, buf_pincount)
  545. __field(int, buf_lockval)
  546. __field(unsigned, buf_flags)
  547. __field(unsigned, bli_recur)
  548. __field(int, bli_refcount)
  549. __field(unsigned, bli_flags)
  550. __field(unsigned long, li_flags)
  551. ),
  552. TP_fast_assign(
  553. __entry->dev = bip->bli_buf->b_target->bt_dev;
  554. __entry->bli_flags = bip->bli_flags;
  555. __entry->bli_recur = bip->bli_recur;
  556. __entry->bli_refcount = atomic_read(&bip->bli_refcount);
  557. __entry->buf_bno = xfs_buf_daddr(bip->bli_buf);
  558. __entry->buf_len = bip->bli_buf->b_length;
  559. __entry->buf_flags = bip->bli_buf->b_flags;
  560. __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
  561. __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
  562. __entry->buf_lockval = bip->bli_buf->b_sema.count;
  563. __entry->li_flags = bip->bli_item.li_flags;
  564. ),
  565. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  566. "lock %d flags %s recur %d refcount %d bliflags %s "
  567. "liflags %s",
  568. MAJOR(__entry->dev), MINOR(__entry->dev),
  569. (unsigned long long)__entry->buf_bno,
  570. __entry->buf_len,
  571. __entry->buf_hold,
  572. __entry->buf_pincount,
  573. __entry->buf_lockval,
  574. __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
  575. __entry->bli_recur,
  576. __entry->bli_refcount,
  577. __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
  578. __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
  579. )
  580. #define DEFINE_BUF_ITEM_EVENT(name) \
  581. DEFINE_EVENT(xfs_buf_item_class, name, \
  582. TP_PROTO(struct xfs_buf_log_item *bip), \
  583. TP_ARGS(bip))
  584. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
  585. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_ordered);
  586. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
  587. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
  588. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
  589. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_ordered);
  590. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
  591. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
  592. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
  593. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_release);
  594. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
  595. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
  596. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
  597. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
  598. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
  599. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
  600. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
  601. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
  602. DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
  603. DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
  604. DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
  605. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
  606. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
  607. DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
  608. DECLARE_EVENT_CLASS(xfs_filestream_class,
  609. TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno),
  610. TP_ARGS(mp, ino, agno),
  611. TP_STRUCT__entry(
  612. __field(dev_t, dev)
  613. __field(xfs_ino_t, ino)
  614. __field(xfs_agnumber_t, agno)
  615. __field(int, streams)
  616. ),
  617. TP_fast_assign(
  618. __entry->dev = mp->m_super->s_dev;
  619. __entry->ino = ino;
  620. __entry->agno = agno;
  621. __entry->streams = xfs_filestream_peek_ag(mp, agno);
  622. ),
  623. TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d",
  624. MAJOR(__entry->dev), MINOR(__entry->dev),
  625. __entry->ino,
  626. __entry->agno,
  627. __entry->streams)
  628. )
  629. #define DEFINE_FILESTREAM_EVENT(name) \
  630. DEFINE_EVENT(xfs_filestream_class, name, \
  631. TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno), \
  632. TP_ARGS(mp, ino, agno))
  633. DEFINE_FILESTREAM_EVENT(xfs_filestream_free);
  634. DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup);
  635. DEFINE_FILESTREAM_EVENT(xfs_filestream_scan);
  636. TRACE_EVENT(xfs_filestream_pick,
  637. TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno,
  638. xfs_extlen_t free, int nscan),
  639. TP_ARGS(ip, agno, free, nscan),
  640. TP_STRUCT__entry(
  641. __field(dev_t, dev)
  642. __field(xfs_ino_t, ino)
  643. __field(xfs_agnumber_t, agno)
  644. __field(int, streams)
  645. __field(xfs_extlen_t, free)
  646. __field(int, nscan)
  647. ),
  648. TP_fast_assign(
  649. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  650. __entry->ino = ip->i_ino;
  651. __entry->agno = agno;
  652. __entry->streams = xfs_filestream_peek_ag(ip->i_mount, agno);
  653. __entry->free = free;
  654. __entry->nscan = nscan;
  655. ),
  656. TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d free %d nscan %d",
  657. MAJOR(__entry->dev), MINOR(__entry->dev),
  658. __entry->ino,
  659. __entry->agno,
  660. __entry->streams,
  661. __entry->free,
  662. __entry->nscan)
  663. );
  664. DECLARE_EVENT_CLASS(xfs_lock_class,
  665. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
  666. unsigned long caller_ip),
  667. TP_ARGS(ip, lock_flags, caller_ip),
  668. TP_STRUCT__entry(
  669. __field(dev_t, dev)
  670. __field(xfs_ino_t, ino)
  671. __field(int, lock_flags)
  672. __field(unsigned long, caller_ip)
  673. ),
  674. TP_fast_assign(
  675. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  676. __entry->ino = ip->i_ino;
  677. __entry->lock_flags = lock_flags;
  678. __entry->caller_ip = caller_ip;
  679. ),
  680. TP_printk("dev %d:%d ino 0x%llx flags %s caller %pS",
  681. MAJOR(__entry->dev), MINOR(__entry->dev),
  682. __entry->ino,
  683. __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
  684. (void *)__entry->caller_ip)
  685. )
  686. #define DEFINE_LOCK_EVENT(name) \
  687. DEFINE_EVENT(xfs_lock_class, name, \
  688. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
  689. unsigned long caller_ip), \
  690. TP_ARGS(ip, lock_flags, caller_ip))
  691. DEFINE_LOCK_EVENT(xfs_ilock);
  692. DEFINE_LOCK_EVENT(xfs_ilock_nowait);
  693. DEFINE_LOCK_EVENT(xfs_ilock_demote);
  694. DEFINE_LOCK_EVENT(xfs_iunlock);
  695. DECLARE_EVENT_CLASS(xfs_inode_class,
  696. TP_PROTO(struct xfs_inode *ip),
  697. TP_ARGS(ip),
  698. TP_STRUCT__entry(
  699. __field(dev_t, dev)
  700. __field(xfs_ino_t, ino)
  701. __field(unsigned long, iflags)
  702. ),
  703. TP_fast_assign(
  704. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  705. __entry->ino = ip->i_ino;
  706. __entry->iflags = ip->i_flags;
  707. ),
  708. TP_printk("dev %d:%d ino 0x%llx iflags 0x%lx",
  709. MAJOR(__entry->dev), MINOR(__entry->dev),
  710. __entry->ino,
  711. __entry->iflags)
  712. )
  713. #define DEFINE_INODE_EVENT(name) \
  714. DEFINE_EVENT(xfs_inode_class, name, \
  715. TP_PROTO(struct xfs_inode *ip), \
  716. TP_ARGS(ip))
  717. DEFINE_INODE_EVENT(xfs_iget_skip);
  718. DEFINE_INODE_EVENT(xfs_iget_recycle);
  719. DEFINE_INODE_EVENT(xfs_iget_recycle_fail);
  720. DEFINE_INODE_EVENT(xfs_iget_hit);
  721. DEFINE_INODE_EVENT(xfs_iget_miss);
  722. DEFINE_INODE_EVENT(xfs_getattr);
  723. DEFINE_INODE_EVENT(xfs_setattr);
  724. DEFINE_INODE_EVENT(xfs_readlink);
  725. DEFINE_INODE_EVENT(xfs_inactive_symlink);
  726. DEFINE_INODE_EVENT(xfs_alloc_file_space);
  727. DEFINE_INODE_EVENT(xfs_free_file_space);
  728. DEFINE_INODE_EVENT(xfs_zero_file_space);
  729. DEFINE_INODE_EVENT(xfs_collapse_file_space);
  730. DEFINE_INODE_EVENT(xfs_insert_file_space);
  731. DEFINE_INODE_EVENT(xfs_readdir);
  732. #ifdef CONFIG_XFS_POSIX_ACL
  733. DEFINE_INODE_EVENT(xfs_get_acl);
  734. #endif
  735. DEFINE_INODE_EVENT(xfs_vm_bmap);
  736. DEFINE_INODE_EVENT(xfs_file_ioctl);
  737. DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
  738. DEFINE_INODE_EVENT(xfs_ioctl_setattr);
  739. DEFINE_INODE_EVENT(xfs_dir_fsync);
  740. DEFINE_INODE_EVENT(xfs_file_fsync);
  741. DEFINE_INODE_EVENT(xfs_destroy_inode);
  742. DEFINE_INODE_EVENT(xfs_update_time);
  743. DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
  744. DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
  745. DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag);
  746. DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag);
  747. DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
  748. DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag);
  749. DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag);
  750. DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid);
  751. DEFINE_INODE_EVENT(xfs_inode_set_reclaimable);
  752. DEFINE_INODE_EVENT(xfs_inode_reclaiming);
  753. DEFINE_INODE_EVENT(xfs_inode_set_need_inactive);
  754. DEFINE_INODE_EVENT(xfs_inode_inactivating);
  755. /*
  756. * ftrace's __print_symbolic requires that all enum values be wrapped in the
  757. * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
  758. * ring buffer. Somehow this was only worth mentioning in the ftrace sample
  759. * code.
  760. */
  761. TRACE_DEFINE_ENUM(PE_SIZE_PTE);
  762. TRACE_DEFINE_ENUM(PE_SIZE_PMD);
  763. TRACE_DEFINE_ENUM(PE_SIZE_PUD);
  764. TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED);
  765. TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW);
  766. TRACE_EVENT(xfs_filemap_fault,
  767. TP_PROTO(struct xfs_inode *ip, enum page_entry_size pe_size,
  768. bool write_fault),
  769. TP_ARGS(ip, pe_size, write_fault),
  770. TP_STRUCT__entry(
  771. __field(dev_t, dev)
  772. __field(xfs_ino_t, ino)
  773. __field(enum page_entry_size, pe_size)
  774. __field(bool, write_fault)
  775. ),
  776. TP_fast_assign(
  777. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  778. __entry->ino = ip->i_ino;
  779. __entry->pe_size = pe_size;
  780. __entry->write_fault = write_fault;
  781. ),
  782. TP_printk("dev %d:%d ino 0x%llx %s write_fault %d",
  783. MAJOR(__entry->dev), MINOR(__entry->dev),
  784. __entry->ino,
  785. __print_symbolic(__entry->pe_size,
  786. { PE_SIZE_PTE, "PTE" },
  787. { PE_SIZE_PMD, "PMD" },
  788. { PE_SIZE_PUD, "PUD" }),
  789. __entry->write_fault)
  790. )
  791. DECLARE_EVENT_CLASS(xfs_iref_class,
  792. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
  793. TP_ARGS(ip, caller_ip),
  794. TP_STRUCT__entry(
  795. __field(dev_t, dev)
  796. __field(xfs_ino_t, ino)
  797. __field(int, count)
  798. __field(int, pincount)
  799. __field(unsigned long, caller_ip)
  800. ),
  801. TP_fast_assign(
  802. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  803. __entry->ino = ip->i_ino;
  804. __entry->count = atomic_read(&VFS_I(ip)->i_count);
  805. __entry->pincount = atomic_read(&ip->i_pincount);
  806. __entry->caller_ip = caller_ip;
  807. ),
  808. TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pS",
  809. MAJOR(__entry->dev), MINOR(__entry->dev),
  810. __entry->ino,
  811. __entry->count,
  812. __entry->pincount,
  813. (char *)__entry->caller_ip)
  814. )
  815. TRACE_EVENT(xfs_iomap_prealloc_size,
  816. TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t blocks, int shift,
  817. unsigned int writeio_blocks),
  818. TP_ARGS(ip, blocks, shift, writeio_blocks),
  819. TP_STRUCT__entry(
  820. __field(dev_t, dev)
  821. __field(xfs_ino_t, ino)
  822. __field(xfs_fsblock_t, blocks)
  823. __field(int, shift)
  824. __field(unsigned int, writeio_blocks)
  825. ),
  826. TP_fast_assign(
  827. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  828. __entry->ino = ip->i_ino;
  829. __entry->blocks = blocks;
  830. __entry->shift = shift;
  831. __entry->writeio_blocks = writeio_blocks;
  832. ),
  833. TP_printk("dev %d:%d ino 0x%llx prealloc blocks %llu shift %d "
  834. "m_allocsize_blocks %u",
  835. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
  836. __entry->blocks, __entry->shift, __entry->writeio_blocks)
  837. )
  838. TRACE_EVENT(xfs_irec_merge_pre,
  839. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
  840. uint16_t holemask, xfs_agino_t nagino, uint16_t nholemask),
  841. TP_ARGS(mp, agno, agino, holemask, nagino, nholemask),
  842. TP_STRUCT__entry(
  843. __field(dev_t, dev)
  844. __field(xfs_agnumber_t, agno)
  845. __field(xfs_agino_t, agino)
  846. __field(uint16_t, holemask)
  847. __field(xfs_agino_t, nagino)
  848. __field(uint16_t, nholemask)
  849. ),
  850. TP_fast_assign(
  851. __entry->dev = mp->m_super->s_dev;
  852. __entry->agno = agno;
  853. __entry->agino = agino;
  854. __entry->holemask = holemask;
  855. __entry->nagino = nagino;
  856. __entry->nholemask = holemask;
  857. ),
  858. TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x new_agino 0x%x new_holemask 0x%x",
  859. MAJOR(__entry->dev), MINOR(__entry->dev),
  860. __entry->agno,
  861. __entry->agino,
  862. __entry->holemask,
  863. __entry->nagino,
  864. __entry->nholemask)
  865. )
  866. TRACE_EVENT(xfs_irec_merge_post,
  867. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
  868. uint16_t holemask),
  869. TP_ARGS(mp, agno, agino, holemask),
  870. TP_STRUCT__entry(
  871. __field(dev_t, dev)
  872. __field(xfs_agnumber_t, agno)
  873. __field(xfs_agino_t, agino)
  874. __field(uint16_t, holemask)
  875. ),
  876. TP_fast_assign(
  877. __entry->dev = mp->m_super->s_dev;
  878. __entry->agno = agno;
  879. __entry->agino = agino;
  880. __entry->holemask = holemask;
  881. ),
  882. TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x",
  883. MAJOR(__entry->dev),
  884. MINOR(__entry->dev),
  885. __entry->agno,
  886. __entry->agino,
  887. __entry->holemask)
  888. )
  889. #define DEFINE_IREF_EVENT(name) \
  890. DEFINE_EVENT(xfs_iref_class, name, \
  891. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
  892. TP_ARGS(ip, caller_ip))
  893. DEFINE_IREF_EVENT(xfs_irele);
  894. DEFINE_IREF_EVENT(xfs_inode_pin);
  895. DEFINE_IREF_EVENT(xfs_inode_unpin);
  896. DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
  897. DECLARE_EVENT_CLASS(xfs_namespace_class,
  898. TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name),
  899. TP_ARGS(dp, name),
  900. TP_STRUCT__entry(
  901. __field(dev_t, dev)
  902. __field(xfs_ino_t, dp_ino)
  903. __field(int, namelen)
  904. __dynamic_array(char, name, name->len)
  905. ),
  906. TP_fast_assign(
  907. __entry->dev = VFS_I(dp)->i_sb->s_dev;
  908. __entry->dp_ino = dp->i_ino;
  909. __entry->namelen = name->len;
  910. memcpy(__get_str(name), name->name, name->len);
  911. ),
  912. TP_printk("dev %d:%d dp ino 0x%llx name %.*s",
  913. MAJOR(__entry->dev), MINOR(__entry->dev),
  914. __entry->dp_ino,
  915. __entry->namelen,
  916. __get_str(name))
  917. )
  918. #define DEFINE_NAMESPACE_EVENT(name) \
  919. DEFINE_EVENT(xfs_namespace_class, name, \
  920. TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name), \
  921. TP_ARGS(dp, name))
  922. DEFINE_NAMESPACE_EVENT(xfs_remove);
  923. DEFINE_NAMESPACE_EVENT(xfs_link);
  924. DEFINE_NAMESPACE_EVENT(xfs_lookup);
  925. DEFINE_NAMESPACE_EVENT(xfs_create);
  926. DEFINE_NAMESPACE_EVENT(xfs_symlink);
  927. TRACE_EVENT(xfs_rename,
  928. TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
  929. struct xfs_name *src_name, struct xfs_name *target_name),
  930. TP_ARGS(src_dp, target_dp, src_name, target_name),
  931. TP_STRUCT__entry(
  932. __field(dev_t, dev)
  933. __field(xfs_ino_t, src_dp_ino)
  934. __field(xfs_ino_t, target_dp_ino)
  935. __field(int, src_namelen)
  936. __field(int, target_namelen)
  937. __dynamic_array(char, src_name, src_name->len)
  938. __dynamic_array(char, target_name, target_name->len)
  939. ),
  940. TP_fast_assign(
  941. __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
  942. __entry->src_dp_ino = src_dp->i_ino;
  943. __entry->target_dp_ino = target_dp->i_ino;
  944. __entry->src_namelen = src_name->len;
  945. __entry->target_namelen = target_name->len;
  946. memcpy(__get_str(src_name), src_name->name, src_name->len);
  947. memcpy(__get_str(target_name), target_name->name,
  948. target_name->len);
  949. ),
  950. TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
  951. " src name %.*s target name %.*s",
  952. MAJOR(__entry->dev), MINOR(__entry->dev),
  953. __entry->src_dp_ino,
  954. __entry->target_dp_ino,
  955. __entry->src_namelen,
  956. __get_str(src_name),
  957. __entry->target_namelen,
  958. __get_str(target_name))
  959. )
  960. DECLARE_EVENT_CLASS(xfs_dquot_class,
  961. TP_PROTO(struct xfs_dquot *dqp),
  962. TP_ARGS(dqp),
  963. TP_STRUCT__entry(
  964. __field(dev_t, dev)
  965. __field(u32, id)
  966. __field(xfs_dqtype_t, type)
  967. __field(unsigned, flags)
  968. __field(unsigned, nrefs)
  969. __field(unsigned long long, res_bcount)
  970. __field(unsigned long long, res_rtbcount)
  971. __field(unsigned long long, res_icount)
  972. __field(unsigned long long, bcount)
  973. __field(unsigned long long, rtbcount)
  974. __field(unsigned long long, icount)
  975. __field(unsigned long long, blk_hardlimit)
  976. __field(unsigned long long, blk_softlimit)
  977. __field(unsigned long long, rtb_hardlimit)
  978. __field(unsigned long long, rtb_softlimit)
  979. __field(unsigned long long, ino_hardlimit)
  980. __field(unsigned long long, ino_softlimit)
  981. ),
  982. TP_fast_assign(
  983. __entry->dev = dqp->q_mount->m_super->s_dev;
  984. __entry->id = dqp->q_id;
  985. __entry->type = dqp->q_type;
  986. __entry->flags = dqp->q_flags;
  987. __entry->nrefs = dqp->q_nrefs;
  988. __entry->res_bcount = dqp->q_blk.reserved;
  989. __entry->res_rtbcount = dqp->q_rtb.reserved;
  990. __entry->res_icount = dqp->q_ino.reserved;
  991. __entry->bcount = dqp->q_blk.count;
  992. __entry->rtbcount = dqp->q_rtb.count;
  993. __entry->icount = dqp->q_ino.count;
  994. __entry->blk_hardlimit = dqp->q_blk.hardlimit;
  995. __entry->blk_softlimit = dqp->q_blk.softlimit;
  996. __entry->rtb_hardlimit = dqp->q_rtb.hardlimit;
  997. __entry->rtb_softlimit = dqp->q_rtb.softlimit;
  998. __entry->ino_hardlimit = dqp->q_ino.hardlimit;
  999. __entry->ino_softlimit = dqp->q_ino.softlimit;
  1000. ),
  1001. TP_printk("dev %d:%d id 0x%x type %s flags %s nrefs %u "
  1002. "res_bc 0x%llx res_rtbc 0x%llx res_ic 0x%llx "
  1003. "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
  1004. "rtbcnt 0x%llx rtbhardlimit 0x%llx rtbsoftlimit 0x%llx "
  1005. "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]",
  1006. MAJOR(__entry->dev), MINOR(__entry->dev),
  1007. __entry->id,
  1008. __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
  1009. __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
  1010. __entry->nrefs,
  1011. __entry->res_bcount,
  1012. __entry->res_rtbcount,
  1013. __entry->res_icount,
  1014. __entry->bcount,
  1015. __entry->blk_hardlimit,
  1016. __entry->blk_softlimit,
  1017. __entry->rtbcount,
  1018. __entry->rtb_hardlimit,
  1019. __entry->rtb_softlimit,
  1020. __entry->icount,
  1021. __entry->ino_hardlimit,
  1022. __entry->ino_softlimit)
  1023. )
  1024. #define DEFINE_DQUOT_EVENT(name) \
  1025. DEFINE_EVENT(xfs_dquot_class, name, \
  1026. TP_PROTO(struct xfs_dquot *dqp), \
  1027. TP_ARGS(dqp))
  1028. DEFINE_DQUOT_EVENT(xfs_dqadjust);
  1029. DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
  1030. DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
  1031. DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy);
  1032. DEFINE_DQUOT_EVENT(xfs_dqreclaim_done);
  1033. DEFINE_DQUOT_EVENT(xfs_dqattach_found);
  1034. DEFINE_DQUOT_EVENT(xfs_dqattach_get);
  1035. DEFINE_DQUOT_EVENT(xfs_dqalloc);
  1036. DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
  1037. DEFINE_DQUOT_EVENT(xfs_dqread);
  1038. DEFINE_DQUOT_EVENT(xfs_dqread_fail);
  1039. DEFINE_DQUOT_EVENT(xfs_dqget_hit);
  1040. DEFINE_DQUOT_EVENT(xfs_dqget_miss);
  1041. DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
  1042. DEFINE_DQUOT_EVENT(xfs_dqget_dup);
  1043. DEFINE_DQUOT_EVENT(xfs_dqput);
  1044. DEFINE_DQUOT_EVENT(xfs_dqput_free);
  1045. DEFINE_DQUOT_EVENT(xfs_dqrele);
  1046. DEFINE_DQUOT_EVENT(xfs_dqflush);
  1047. DEFINE_DQUOT_EVENT(xfs_dqflush_force);
  1048. DEFINE_DQUOT_EVENT(xfs_dqflush_done);
  1049. DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_before);
  1050. DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_after);
  1051. TRACE_EVENT(xfs_trans_mod_dquot,
  1052. TP_PROTO(struct xfs_trans *tp, struct xfs_dquot *dqp,
  1053. unsigned int field, int64_t delta),
  1054. TP_ARGS(tp, dqp, field, delta),
  1055. TP_STRUCT__entry(
  1056. __field(dev_t, dev)
  1057. __field(xfs_dqtype_t, type)
  1058. __field(unsigned int, flags)
  1059. __field(unsigned int, dqid)
  1060. __field(unsigned int, field)
  1061. __field(int64_t, delta)
  1062. ),
  1063. TP_fast_assign(
  1064. __entry->dev = tp->t_mountp->m_super->s_dev;
  1065. __entry->type = dqp->q_type;
  1066. __entry->flags = dqp->q_flags;
  1067. __entry->dqid = dqp->q_id;
  1068. __entry->field = field;
  1069. __entry->delta = delta;
  1070. ),
  1071. TP_printk("dev %d:%d dquot id 0x%x type %s flags %s field %s delta %lld",
  1072. MAJOR(__entry->dev), MINOR(__entry->dev),
  1073. __entry->dqid,
  1074. __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
  1075. __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
  1076. __print_flags(__entry->field, "|", XFS_QMOPT_FLAGS),
  1077. __entry->delta)
  1078. );
  1079. DECLARE_EVENT_CLASS(xfs_dqtrx_class,
  1080. TP_PROTO(struct xfs_dqtrx *qtrx),
  1081. TP_ARGS(qtrx),
  1082. TP_STRUCT__entry(
  1083. __field(dev_t, dev)
  1084. __field(xfs_dqtype_t, type)
  1085. __field(unsigned int, flags)
  1086. __field(u32, dqid)
  1087. __field(uint64_t, blk_res)
  1088. __field(int64_t, bcount_delta)
  1089. __field(int64_t, delbcnt_delta)
  1090. __field(uint64_t, rtblk_res)
  1091. __field(uint64_t, rtblk_res_used)
  1092. __field(int64_t, rtbcount_delta)
  1093. __field(int64_t, delrtb_delta)
  1094. __field(uint64_t, ino_res)
  1095. __field(uint64_t, ino_res_used)
  1096. __field(int64_t, icount_delta)
  1097. ),
  1098. TP_fast_assign(
  1099. __entry->dev = qtrx->qt_dquot->q_mount->m_super->s_dev;
  1100. __entry->type = qtrx->qt_dquot->q_type;
  1101. __entry->flags = qtrx->qt_dquot->q_flags;
  1102. __entry->dqid = qtrx->qt_dquot->q_id;
  1103. __entry->blk_res = qtrx->qt_blk_res;
  1104. __entry->bcount_delta = qtrx->qt_bcount_delta;
  1105. __entry->delbcnt_delta = qtrx->qt_delbcnt_delta;
  1106. __entry->rtblk_res = qtrx->qt_rtblk_res;
  1107. __entry->rtblk_res_used = qtrx->qt_rtblk_res_used;
  1108. __entry->rtbcount_delta = qtrx->qt_rtbcount_delta;
  1109. __entry->delrtb_delta = qtrx->qt_delrtb_delta;
  1110. __entry->ino_res = qtrx->qt_ino_res;
  1111. __entry->ino_res_used = qtrx->qt_ino_res_used;
  1112. __entry->icount_delta = qtrx->qt_icount_delta;
  1113. ),
  1114. TP_printk("dev %d:%d dquot id 0x%x type %s flags %s "
  1115. "blk_res %llu bcount_delta %lld delbcnt_delta %lld "
  1116. "rtblk_res %llu rtblk_res_used %llu rtbcount_delta %lld delrtb_delta %lld "
  1117. "ino_res %llu ino_res_used %llu icount_delta %lld",
  1118. MAJOR(__entry->dev), MINOR(__entry->dev),
  1119. __entry->dqid,
  1120. __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
  1121. __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
  1122. __entry->blk_res,
  1123. __entry->bcount_delta,
  1124. __entry->delbcnt_delta,
  1125. __entry->rtblk_res,
  1126. __entry->rtblk_res_used,
  1127. __entry->rtbcount_delta,
  1128. __entry->delrtb_delta,
  1129. __entry->ino_res,
  1130. __entry->ino_res_used,
  1131. __entry->icount_delta)
  1132. )
  1133. #define DEFINE_DQTRX_EVENT(name) \
  1134. DEFINE_EVENT(xfs_dqtrx_class, name, \
  1135. TP_PROTO(struct xfs_dqtrx *qtrx), \
  1136. TP_ARGS(qtrx))
  1137. DEFINE_DQTRX_EVENT(xfs_trans_apply_dquot_deltas);
  1138. DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_before);
  1139. DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_after);
  1140. DECLARE_EVENT_CLASS(xfs_loggrant_class,
  1141. TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
  1142. TP_ARGS(log, tic),
  1143. TP_STRUCT__entry(
  1144. __field(dev_t, dev)
  1145. __field(char, ocnt)
  1146. __field(char, cnt)
  1147. __field(int, curr_res)
  1148. __field(int, unit_res)
  1149. __field(unsigned int, flags)
  1150. __field(int, reserveq)
  1151. __field(int, writeq)
  1152. __field(int, grant_reserve_cycle)
  1153. __field(int, grant_reserve_bytes)
  1154. __field(int, grant_write_cycle)
  1155. __field(int, grant_write_bytes)
  1156. __field(int, curr_cycle)
  1157. __field(int, curr_block)
  1158. __field(xfs_lsn_t, tail_lsn)
  1159. ),
  1160. TP_fast_assign(
  1161. __entry->dev = log->l_mp->m_super->s_dev;
  1162. __entry->ocnt = tic->t_ocnt;
  1163. __entry->cnt = tic->t_cnt;
  1164. __entry->curr_res = tic->t_curr_res;
  1165. __entry->unit_res = tic->t_unit_res;
  1166. __entry->flags = tic->t_flags;
  1167. __entry->reserveq = list_empty(&log->l_reserve_head.waiters);
  1168. __entry->writeq = list_empty(&log->l_write_head.waiters);
  1169. xlog_crack_grant_head(&log->l_reserve_head.grant,
  1170. &__entry->grant_reserve_cycle,
  1171. &__entry->grant_reserve_bytes);
  1172. xlog_crack_grant_head(&log->l_write_head.grant,
  1173. &__entry->grant_write_cycle,
  1174. &__entry->grant_write_bytes);
  1175. __entry->curr_cycle = log->l_curr_cycle;
  1176. __entry->curr_block = log->l_curr_block;
  1177. __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
  1178. ),
  1179. TP_printk("dev %d:%d t_ocnt %u t_cnt %u t_curr_res %u "
  1180. "t_unit_res %u t_flags %s reserveq %s "
  1181. "writeq %s grant_reserve_cycle %d "
  1182. "grant_reserve_bytes %d grant_write_cycle %d "
  1183. "grant_write_bytes %d curr_cycle %d curr_block %d "
  1184. "tail_cycle %d tail_block %d",
  1185. MAJOR(__entry->dev), MINOR(__entry->dev),
  1186. __entry->ocnt,
  1187. __entry->cnt,
  1188. __entry->curr_res,
  1189. __entry->unit_res,
  1190. __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
  1191. __entry->reserveq ? "empty" : "active",
  1192. __entry->writeq ? "empty" : "active",
  1193. __entry->grant_reserve_cycle,
  1194. __entry->grant_reserve_bytes,
  1195. __entry->grant_write_cycle,
  1196. __entry->grant_write_bytes,
  1197. __entry->curr_cycle,
  1198. __entry->curr_block,
  1199. CYCLE_LSN(__entry->tail_lsn),
  1200. BLOCK_LSN(__entry->tail_lsn)
  1201. )
  1202. )
  1203. #define DEFINE_LOGGRANT_EVENT(name) \
  1204. DEFINE_EVENT(xfs_loggrant_class, name, \
  1205. TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
  1206. TP_ARGS(log, tic))
  1207. DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
  1208. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
  1209. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
  1210. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
  1211. DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
  1212. DEFINE_LOGGRANT_EVENT(xfs_log_reserve_exit);
  1213. DEFINE_LOGGRANT_EVENT(xfs_log_regrant);
  1214. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_exit);
  1215. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant);
  1216. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_exit);
  1217. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_sub);
  1218. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant);
  1219. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_sub);
  1220. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_exit);
  1221. DEFINE_LOGGRANT_EVENT(xfs_log_cil_wait);
  1222. DECLARE_EVENT_CLASS(xfs_log_item_class,
  1223. TP_PROTO(struct xfs_log_item *lip),
  1224. TP_ARGS(lip),
  1225. TP_STRUCT__entry(
  1226. __field(dev_t, dev)
  1227. __field(void *, lip)
  1228. __field(uint, type)
  1229. __field(unsigned long, flags)
  1230. __field(xfs_lsn_t, lsn)
  1231. ),
  1232. TP_fast_assign(
  1233. __entry->dev = lip->li_log->l_mp->m_super->s_dev;
  1234. __entry->lip = lip;
  1235. __entry->type = lip->li_type;
  1236. __entry->flags = lip->li_flags;
  1237. __entry->lsn = lip->li_lsn;
  1238. ),
  1239. TP_printk("dev %d:%d lip %p lsn %d/%d type %s flags %s",
  1240. MAJOR(__entry->dev), MINOR(__entry->dev),
  1241. __entry->lip,
  1242. CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
  1243. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1244. __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
  1245. )
  1246. TRACE_EVENT(xfs_log_force,
  1247. TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn, unsigned long caller_ip),
  1248. TP_ARGS(mp, lsn, caller_ip),
  1249. TP_STRUCT__entry(
  1250. __field(dev_t, dev)
  1251. __field(xfs_lsn_t, lsn)
  1252. __field(unsigned long, caller_ip)
  1253. ),
  1254. TP_fast_assign(
  1255. __entry->dev = mp->m_super->s_dev;
  1256. __entry->lsn = lsn;
  1257. __entry->caller_ip = caller_ip;
  1258. ),
  1259. TP_printk("dev %d:%d lsn 0x%llx caller %pS",
  1260. MAJOR(__entry->dev), MINOR(__entry->dev),
  1261. __entry->lsn, (void *)__entry->caller_ip)
  1262. )
  1263. #define DEFINE_LOG_ITEM_EVENT(name) \
  1264. DEFINE_EVENT(xfs_log_item_class, name, \
  1265. TP_PROTO(struct xfs_log_item *lip), \
  1266. TP_ARGS(lip))
  1267. DEFINE_LOG_ITEM_EVENT(xfs_ail_push);
  1268. DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned);
  1269. DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
  1270. DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing);
  1271. DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_mark);
  1272. DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_skip);
  1273. DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_unpin);
  1274. DECLARE_EVENT_CLASS(xfs_ail_class,
  1275. TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn),
  1276. TP_ARGS(lip, old_lsn, new_lsn),
  1277. TP_STRUCT__entry(
  1278. __field(dev_t, dev)
  1279. __field(void *, lip)
  1280. __field(uint, type)
  1281. __field(unsigned long, flags)
  1282. __field(xfs_lsn_t, old_lsn)
  1283. __field(xfs_lsn_t, new_lsn)
  1284. ),
  1285. TP_fast_assign(
  1286. __entry->dev = lip->li_log->l_mp->m_super->s_dev;
  1287. __entry->lip = lip;
  1288. __entry->type = lip->li_type;
  1289. __entry->flags = lip->li_flags;
  1290. __entry->old_lsn = old_lsn;
  1291. __entry->new_lsn = new_lsn;
  1292. ),
  1293. TP_printk("dev %d:%d lip %p old lsn %d/%d new lsn %d/%d type %s flags %s",
  1294. MAJOR(__entry->dev), MINOR(__entry->dev),
  1295. __entry->lip,
  1296. CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
  1297. CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
  1298. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1299. __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
  1300. )
  1301. #define DEFINE_AIL_EVENT(name) \
  1302. DEFINE_EVENT(xfs_ail_class, name, \
  1303. TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn), \
  1304. TP_ARGS(lip, old_lsn, new_lsn))
  1305. DEFINE_AIL_EVENT(xfs_ail_insert);
  1306. DEFINE_AIL_EVENT(xfs_ail_move);
  1307. DEFINE_AIL_EVENT(xfs_ail_delete);
  1308. TRACE_EVENT(xfs_log_assign_tail_lsn,
  1309. TP_PROTO(struct xlog *log, xfs_lsn_t new_lsn),
  1310. TP_ARGS(log, new_lsn),
  1311. TP_STRUCT__entry(
  1312. __field(dev_t, dev)
  1313. __field(xfs_lsn_t, new_lsn)
  1314. __field(xfs_lsn_t, old_lsn)
  1315. __field(xfs_lsn_t, last_sync_lsn)
  1316. ),
  1317. TP_fast_assign(
  1318. __entry->dev = log->l_mp->m_super->s_dev;
  1319. __entry->new_lsn = new_lsn;
  1320. __entry->old_lsn = atomic64_read(&log->l_tail_lsn);
  1321. __entry->last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
  1322. ),
  1323. TP_printk("dev %d:%d new tail lsn %d/%d, old lsn %d/%d, last sync %d/%d",
  1324. MAJOR(__entry->dev), MINOR(__entry->dev),
  1325. CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
  1326. CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
  1327. CYCLE_LSN(__entry->last_sync_lsn), BLOCK_LSN(__entry->last_sync_lsn))
  1328. )
  1329. DECLARE_EVENT_CLASS(xfs_file_class,
  1330. TP_PROTO(struct kiocb *iocb, struct iov_iter *iter),
  1331. TP_ARGS(iocb, iter),
  1332. TP_STRUCT__entry(
  1333. __field(dev_t, dev)
  1334. __field(xfs_ino_t, ino)
  1335. __field(xfs_fsize_t, size)
  1336. __field(loff_t, offset)
  1337. __field(size_t, count)
  1338. ),
  1339. TP_fast_assign(
  1340. __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
  1341. __entry->ino = XFS_I(file_inode(iocb->ki_filp))->i_ino;
  1342. __entry->size = XFS_I(file_inode(iocb->ki_filp))->i_disk_size;
  1343. __entry->offset = iocb->ki_pos;
  1344. __entry->count = iov_iter_count(iter);
  1345. ),
  1346. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx",
  1347. MAJOR(__entry->dev), MINOR(__entry->dev),
  1348. __entry->ino,
  1349. __entry->size,
  1350. __entry->offset,
  1351. __entry->count)
  1352. )
  1353. #define DEFINE_RW_EVENT(name) \
  1354. DEFINE_EVENT(xfs_file_class, name, \
  1355. TP_PROTO(struct kiocb *iocb, struct iov_iter *iter), \
  1356. TP_ARGS(iocb, iter))
  1357. DEFINE_RW_EVENT(xfs_file_buffered_read);
  1358. DEFINE_RW_EVENT(xfs_file_direct_read);
  1359. DEFINE_RW_EVENT(xfs_file_dax_read);
  1360. DEFINE_RW_EVENT(xfs_file_buffered_write);
  1361. DEFINE_RW_EVENT(xfs_file_direct_write);
  1362. DEFINE_RW_EVENT(xfs_file_dax_write);
  1363. DEFINE_RW_EVENT(xfs_reflink_bounce_dio_write);
  1364. DECLARE_EVENT_CLASS(xfs_imap_class,
  1365. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
  1366. int whichfork, struct xfs_bmbt_irec *irec),
  1367. TP_ARGS(ip, offset, count, whichfork, irec),
  1368. TP_STRUCT__entry(
  1369. __field(dev_t, dev)
  1370. __field(xfs_ino_t, ino)
  1371. __field(loff_t, size)
  1372. __field(loff_t, offset)
  1373. __field(size_t, count)
  1374. __field(int, whichfork)
  1375. __field(xfs_fileoff_t, startoff)
  1376. __field(xfs_fsblock_t, startblock)
  1377. __field(xfs_filblks_t, blockcount)
  1378. ),
  1379. TP_fast_assign(
  1380. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1381. __entry->ino = ip->i_ino;
  1382. __entry->size = ip->i_disk_size;
  1383. __entry->offset = offset;
  1384. __entry->count = count;
  1385. __entry->whichfork = whichfork;
  1386. __entry->startoff = irec ? irec->br_startoff : 0;
  1387. __entry->startblock = irec ? irec->br_startblock : 0;
  1388. __entry->blockcount = irec ? irec->br_blockcount : 0;
  1389. ),
  1390. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx "
  1391. "fork %s startoff 0x%llx startblock 0x%llx fsbcount 0x%llx",
  1392. MAJOR(__entry->dev), MINOR(__entry->dev),
  1393. __entry->ino,
  1394. __entry->size,
  1395. __entry->offset,
  1396. __entry->count,
  1397. __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
  1398. __entry->startoff,
  1399. (int64_t)__entry->startblock,
  1400. __entry->blockcount)
  1401. )
  1402. #define DEFINE_IMAP_EVENT(name) \
  1403. DEFINE_EVENT(xfs_imap_class, name, \
  1404. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
  1405. int whichfork, struct xfs_bmbt_irec *irec), \
  1406. TP_ARGS(ip, offset, count, whichfork, irec))
  1407. DEFINE_IMAP_EVENT(xfs_map_blocks_found);
  1408. DEFINE_IMAP_EVENT(xfs_map_blocks_alloc);
  1409. DEFINE_IMAP_EVENT(xfs_iomap_alloc);
  1410. DEFINE_IMAP_EVENT(xfs_iomap_found);
  1411. DECLARE_EVENT_CLASS(xfs_simple_io_class,
  1412. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
  1413. TP_ARGS(ip, offset, count),
  1414. TP_STRUCT__entry(
  1415. __field(dev_t, dev)
  1416. __field(xfs_ino_t, ino)
  1417. __field(loff_t, isize)
  1418. __field(loff_t, disize)
  1419. __field(loff_t, offset)
  1420. __field(size_t, count)
  1421. ),
  1422. TP_fast_assign(
  1423. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1424. __entry->ino = ip->i_ino;
  1425. __entry->isize = VFS_I(ip)->i_size;
  1426. __entry->disize = ip->i_disk_size;
  1427. __entry->offset = offset;
  1428. __entry->count = count;
  1429. ),
  1430. TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx "
  1431. "pos 0x%llx bytecount 0x%zx",
  1432. MAJOR(__entry->dev), MINOR(__entry->dev),
  1433. __entry->ino,
  1434. __entry->isize,
  1435. __entry->disize,
  1436. __entry->offset,
  1437. __entry->count)
  1438. );
  1439. #define DEFINE_SIMPLE_IO_EVENT(name) \
  1440. DEFINE_EVENT(xfs_simple_io_class, name, \
  1441. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
  1442. TP_ARGS(ip, offset, count))
  1443. DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
  1444. DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  1445. DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
  1446. DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
  1447. DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write);
  1448. DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_unwritten);
  1449. DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_append);
  1450. DECLARE_EVENT_CLASS(xfs_itrunc_class,
  1451. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
  1452. TP_ARGS(ip, new_size),
  1453. TP_STRUCT__entry(
  1454. __field(dev_t, dev)
  1455. __field(xfs_ino_t, ino)
  1456. __field(xfs_fsize_t, size)
  1457. __field(xfs_fsize_t, new_size)
  1458. ),
  1459. TP_fast_assign(
  1460. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1461. __entry->ino = ip->i_ino;
  1462. __entry->size = ip->i_disk_size;
  1463. __entry->new_size = new_size;
  1464. ),
  1465. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx new_size 0x%llx",
  1466. MAJOR(__entry->dev), MINOR(__entry->dev),
  1467. __entry->ino,
  1468. __entry->size,
  1469. __entry->new_size)
  1470. )
  1471. #define DEFINE_ITRUNC_EVENT(name) \
  1472. DEFINE_EVENT(xfs_itrunc_class, name, \
  1473. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
  1474. TP_ARGS(ip, new_size))
  1475. DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start);
  1476. DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end);
  1477. TRACE_EVENT(xfs_pagecache_inval,
  1478. TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
  1479. TP_ARGS(ip, start, finish),
  1480. TP_STRUCT__entry(
  1481. __field(dev_t, dev)
  1482. __field(xfs_ino_t, ino)
  1483. __field(xfs_fsize_t, size)
  1484. __field(xfs_off_t, start)
  1485. __field(xfs_off_t, finish)
  1486. ),
  1487. TP_fast_assign(
  1488. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1489. __entry->ino = ip->i_ino;
  1490. __entry->size = ip->i_disk_size;
  1491. __entry->start = start;
  1492. __entry->finish = finish;
  1493. ),
  1494. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx start 0x%llx finish 0x%llx",
  1495. MAJOR(__entry->dev), MINOR(__entry->dev),
  1496. __entry->ino,
  1497. __entry->size,
  1498. __entry->start,
  1499. __entry->finish)
  1500. );
  1501. TRACE_EVENT(xfs_bunmap,
  1502. TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t fileoff, xfs_filblks_t len,
  1503. int flags, unsigned long caller_ip),
  1504. TP_ARGS(ip, fileoff, len, flags, caller_ip),
  1505. TP_STRUCT__entry(
  1506. __field(dev_t, dev)
  1507. __field(xfs_ino_t, ino)
  1508. __field(xfs_fsize_t, size)
  1509. __field(xfs_fileoff_t, fileoff)
  1510. __field(xfs_filblks_t, len)
  1511. __field(unsigned long, caller_ip)
  1512. __field(int, flags)
  1513. ),
  1514. TP_fast_assign(
  1515. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1516. __entry->ino = ip->i_ino;
  1517. __entry->size = ip->i_disk_size;
  1518. __entry->fileoff = fileoff;
  1519. __entry->len = len;
  1520. __entry->caller_ip = caller_ip;
  1521. __entry->flags = flags;
  1522. ),
  1523. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx fileoff 0x%llx fsbcount 0x%llx "
  1524. "flags %s caller %pS",
  1525. MAJOR(__entry->dev), MINOR(__entry->dev),
  1526. __entry->ino,
  1527. __entry->size,
  1528. __entry->fileoff,
  1529. __entry->len,
  1530. __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
  1531. (void *)__entry->caller_ip)
  1532. );
  1533. DECLARE_EVENT_CLASS(xfs_extent_busy_class,
  1534. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1535. xfs_agblock_t agbno, xfs_extlen_t len),
  1536. TP_ARGS(mp, agno, agbno, len),
  1537. TP_STRUCT__entry(
  1538. __field(dev_t, dev)
  1539. __field(xfs_agnumber_t, agno)
  1540. __field(xfs_agblock_t, agbno)
  1541. __field(xfs_extlen_t, len)
  1542. ),
  1543. TP_fast_assign(
  1544. __entry->dev = mp->m_super->s_dev;
  1545. __entry->agno = agno;
  1546. __entry->agbno = agbno;
  1547. __entry->len = len;
  1548. ),
  1549. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
  1550. MAJOR(__entry->dev), MINOR(__entry->dev),
  1551. __entry->agno,
  1552. __entry->agbno,
  1553. __entry->len)
  1554. );
  1555. #define DEFINE_BUSY_EVENT(name) \
  1556. DEFINE_EVENT(xfs_extent_busy_class, name, \
  1557. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  1558. xfs_agblock_t agbno, xfs_extlen_t len), \
  1559. TP_ARGS(mp, agno, agbno, len))
  1560. DEFINE_BUSY_EVENT(xfs_extent_busy);
  1561. DEFINE_BUSY_EVENT(xfs_extent_busy_enomem);
  1562. DEFINE_BUSY_EVENT(xfs_extent_busy_force);
  1563. DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
  1564. DEFINE_BUSY_EVENT(xfs_extent_busy_clear);
  1565. TRACE_EVENT(xfs_extent_busy_trim,
  1566. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1567. xfs_agblock_t agbno, xfs_extlen_t len,
  1568. xfs_agblock_t tbno, xfs_extlen_t tlen),
  1569. TP_ARGS(mp, agno, agbno, len, tbno, tlen),
  1570. TP_STRUCT__entry(
  1571. __field(dev_t, dev)
  1572. __field(xfs_agnumber_t, agno)
  1573. __field(xfs_agblock_t, agbno)
  1574. __field(xfs_extlen_t, len)
  1575. __field(xfs_agblock_t, tbno)
  1576. __field(xfs_extlen_t, tlen)
  1577. ),
  1578. TP_fast_assign(
  1579. __entry->dev = mp->m_super->s_dev;
  1580. __entry->agno = agno;
  1581. __entry->agbno = agbno;
  1582. __entry->len = len;
  1583. __entry->tbno = tbno;
  1584. __entry->tlen = tlen;
  1585. ),
  1586. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x",
  1587. MAJOR(__entry->dev), MINOR(__entry->dev),
  1588. __entry->agno,
  1589. __entry->agbno,
  1590. __entry->len,
  1591. __entry->tbno,
  1592. __entry->tlen)
  1593. );
  1594. DECLARE_EVENT_CLASS(xfs_agf_class,
  1595. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
  1596. unsigned long caller_ip),
  1597. TP_ARGS(mp, agf, flags, caller_ip),
  1598. TP_STRUCT__entry(
  1599. __field(dev_t, dev)
  1600. __field(xfs_agnumber_t, agno)
  1601. __field(int, flags)
  1602. __field(__u32, length)
  1603. __field(__u32, bno_root)
  1604. __field(__u32, cnt_root)
  1605. __field(__u32, bno_level)
  1606. __field(__u32, cnt_level)
  1607. __field(__u32, flfirst)
  1608. __field(__u32, fllast)
  1609. __field(__u32, flcount)
  1610. __field(__u32, freeblks)
  1611. __field(__u32, longest)
  1612. __field(unsigned long, caller_ip)
  1613. ),
  1614. TP_fast_assign(
  1615. __entry->dev = mp->m_super->s_dev;
  1616. __entry->agno = be32_to_cpu(agf->agf_seqno),
  1617. __entry->flags = flags;
  1618. __entry->length = be32_to_cpu(agf->agf_length),
  1619. __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
  1620. __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
  1621. __entry->bno_level =
  1622. be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
  1623. __entry->cnt_level =
  1624. be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
  1625. __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  1626. __entry->fllast = be32_to_cpu(agf->agf_fllast),
  1627. __entry->flcount = be32_to_cpu(agf->agf_flcount),
  1628. __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  1629. __entry->longest = be32_to_cpu(agf->agf_longest);
  1630. __entry->caller_ip = caller_ip;
  1631. ),
  1632. TP_printk("dev %d:%d agno 0x%x flags %s length %u roots b %u c %u "
  1633. "levels b %u c %u flfirst %u fllast %u flcount %u "
  1634. "freeblks %u longest %u caller %pS",
  1635. MAJOR(__entry->dev), MINOR(__entry->dev),
  1636. __entry->agno,
  1637. __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
  1638. __entry->length,
  1639. __entry->bno_root,
  1640. __entry->cnt_root,
  1641. __entry->bno_level,
  1642. __entry->cnt_level,
  1643. __entry->flfirst,
  1644. __entry->fllast,
  1645. __entry->flcount,
  1646. __entry->freeblks,
  1647. __entry->longest,
  1648. (void *)__entry->caller_ip)
  1649. );
  1650. #define DEFINE_AGF_EVENT(name) \
  1651. DEFINE_EVENT(xfs_agf_class, name, \
  1652. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
  1653. unsigned long caller_ip), \
  1654. TP_ARGS(mp, agf, flags, caller_ip))
  1655. DEFINE_AGF_EVENT(xfs_agf);
  1656. DEFINE_AGF_EVENT(xfs_agfl_reset);
  1657. TRACE_EVENT(xfs_free_extent,
  1658. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1659. xfs_extlen_t len, enum xfs_ag_resv_type resv, int haveleft,
  1660. int haveright),
  1661. TP_ARGS(mp, agno, agbno, len, resv, haveleft, haveright),
  1662. TP_STRUCT__entry(
  1663. __field(dev_t, dev)
  1664. __field(xfs_agnumber_t, agno)
  1665. __field(xfs_agblock_t, agbno)
  1666. __field(xfs_extlen_t, len)
  1667. __field(int, resv)
  1668. __field(int, haveleft)
  1669. __field(int, haveright)
  1670. ),
  1671. TP_fast_assign(
  1672. __entry->dev = mp->m_super->s_dev;
  1673. __entry->agno = agno;
  1674. __entry->agbno = agbno;
  1675. __entry->len = len;
  1676. __entry->resv = resv;
  1677. __entry->haveleft = haveleft;
  1678. __entry->haveright = haveright;
  1679. ),
  1680. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x resv %d %s",
  1681. MAJOR(__entry->dev), MINOR(__entry->dev),
  1682. __entry->agno,
  1683. __entry->agbno,
  1684. __entry->len,
  1685. __entry->resv,
  1686. __entry->haveleft ?
  1687. (__entry->haveright ? "both" : "left") :
  1688. (__entry->haveright ? "right" : "none"))
  1689. );
  1690. DECLARE_EVENT_CLASS(xfs_alloc_class,
  1691. TP_PROTO(struct xfs_alloc_arg *args),
  1692. TP_ARGS(args),
  1693. TP_STRUCT__entry(
  1694. __field(dev_t, dev)
  1695. __field(xfs_agnumber_t, agno)
  1696. __field(xfs_agblock_t, agbno)
  1697. __field(xfs_extlen_t, minlen)
  1698. __field(xfs_extlen_t, maxlen)
  1699. __field(xfs_extlen_t, mod)
  1700. __field(xfs_extlen_t, prod)
  1701. __field(xfs_extlen_t, minleft)
  1702. __field(xfs_extlen_t, total)
  1703. __field(xfs_extlen_t, alignment)
  1704. __field(xfs_extlen_t, minalignslop)
  1705. __field(xfs_extlen_t, len)
  1706. __field(short, type)
  1707. __field(short, otype)
  1708. __field(char, wasdel)
  1709. __field(char, wasfromfl)
  1710. __field(int, resv)
  1711. __field(int, datatype)
  1712. __field(xfs_fsblock_t, firstblock)
  1713. ),
  1714. TP_fast_assign(
  1715. __entry->dev = args->mp->m_super->s_dev;
  1716. __entry->agno = args->agno;
  1717. __entry->agbno = args->agbno;
  1718. __entry->minlen = args->minlen;
  1719. __entry->maxlen = args->maxlen;
  1720. __entry->mod = args->mod;
  1721. __entry->prod = args->prod;
  1722. __entry->minleft = args->minleft;
  1723. __entry->total = args->total;
  1724. __entry->alignment = args->alignment;
  1725. __entry->minalignslop = args->minalignslop;
  1726. __entry->len = args->len;
  1727. __entry->type = args->type;
  1728. __entry->otype = args->otype;
  1729. __entry->wasdel = args->wasdel;
  1730. __entry->wasfromfl = args->wasfromfl;
  1731. __entry->resv = args->resv;
  1732. __entry->datatype = args->datatype;
  1733. __entry->firstblock = args->tp->t_firstblock;
  1734. ),
  1735. TP_printk("dev %d:%d agno 0x%x agbno 0x%x minlen %u maxlen %u mod %u "
  1736. "prod %u minleft %u total %u alignment %u minalignslop %u "
  1737. "len %u type %s otype %s wasdel %d wasfromfl %d resv %d "
  1738. "datatype 0x%x firstblock 0x%llx",
  1739. MAJOR(__entry->dev), MINOR(__entry->dev),
  1740. __entry->agno,
  1741. __entry->agbno,
  1742. __entry->minlen,
  1743. __entry->maxlen,
  1744. __entry->mod,
  1745. __entry->prod,
  1746. __entry->minleft,
  1747. __entry->total,
  1748. __entry->alignment,
  1749. __entry->minalignslop,
  1750. __entry->len,
  1751. __print_symbolic(__entry->type, XFS_ALLOC_TYPES),
  1752. __print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
  1753. __entry->wasdel,
  1754. __entry->wasfromfl,
  1755. __entry->resv,
  1756. __entry->datatype,
  1757. (unsigned long long)__entry->firstblock)
  1758. )
  1759. #define DEFINE_ALLOC_EVENT(name) \
  1760. DEFINE_EVENT(xfs_alloc_class, name, \
  1761. TP_PROTO(struct xfs_alloc_arg *args), \
  1762. TP_ARGS(args))
  1763. DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
  1764. DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
  1765. DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
  1766. DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
  1767. DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
  1768. DEFINE_ALLOC_EVENT(xfs_alloc_cur);
  1769. DEFINE_ALLOC_EVENT(xfs_alloc_cur_right);
  1770. DEFINE_ALLOC_EVENT(xfs_alloc_cur_left);
  1771. DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup);
  1772. DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup_done);
  1773. DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
  1774. DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry);
  1775. DEFINE_ALLOC_EVENT(xfs_alloc_near_busy);
  1776. DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
  1777. DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
  1778. DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
  1779. DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
  1780. DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
  1781. DEFINE_ALLOC_EVENT(xfs_alloc_size_busy);
  1782. DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
  1783. DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
  1784. DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
  1785. DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
  1786. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
  1787. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
  1788. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
  1789. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
  1790. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
  1791. TRACE_EVENT(xfs_alloc_cur_check,
  1792. TP_PROTO(struct xfs_mount *mp, xfs_btnum_t btnum, xfs_agblock_t bno,
  1793. xfs_extlen_t len, xfs_extlen_t diff, bool new),
  1794. TP_ARGS(mp, btnum, bno, len, diff, new),
  1795. TP_STRUCT__entry(
  1796. __field(dev_t, dev)
  1797. __field(xfs_btnum_t, btnum)
  1798. __field(xfs_agblock_t, bno)
  1799. __field(xfs_extlen_t, len)
  1800. __field(xfs_extlen_t, diff)
  1801. __field(bool, new)
  1802. ),
  1803. TP_fast_assign(
  1804. __entry->dev = mp->m_super->s_dev;
  1805. __entry->btnum = btnum;
  1806. __entry->bno = bno;
  1807. __entry->len = len;
  1808. __entry->diff = diff;
  1809. __entry->new = new;
  1810. ),
  1811. TP_printk("dev %d:%d btree %s agbno 0x%x fsbcount 0x%x diff 0x%x new %d",
  1812. MAJOR(__entry->dev), MINOR(__entry->dev),
  1813. __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
  1814. __entry->bno, __entry->len, __entry->diff, __entry->new)
  1815. )
  1816. DECLARE_EVENT_CLASS(xfs_da_class,
  1817. TP_PROTO(struct xfs_da_args *args),
  1818. TP_ARGS(args),
  1819. TP_STRUCT__entry(
  1820. __field(dev_t, dev)
  1821. __field(xfs_ino_t, ino)
  1822. __dynamic_array(char, name, args->namelen)
  1823. __field(int, namelen)
  1824. __field(xfs_dahash_t, hashval)
  1825. __field(xfs_ino_t, inumber)
  1826. __field(uint32_t, op_flags)
  1827. ),
  1828. TP_fast_assign(
  1829. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1830. __entry->ino = args->dp->i_ino;
  1831. if (args->namelen)
  1832. memcpy(__get_str(name), args->name, args->namelen);
  1833. __entry->namelen = args->namelen;
  1834. __entry->hashval = args->hashval;
  1835. __entry->inumber = args->inumber;
  1836. __entry->op_flags = args->op_flags;
  1837. ),
  1838. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
  1839. "inumber 0x%llx op_flags %s",
  1840. MAJOR(__entry->dev), MINOR(__entry->dev),
  1841. __entry->ino,
  1842. __entry->namelen,
  1843. __entry->namelen ? __get_str(name) : NULL,
  1844. __entry->namelen,
  1845. __entry->hashval,
  1846. __entry->inumber,
  1847. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1848. )
  1849. #define DEFINE_DIR2_EVENT(name) \
  1850. DEFINE_EVENT(xfs_da_class, name, \
  1851. TP_PROTO(struct xfs_da_args *args), \
  1852. TP_ARGS(args))
  1853. DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
  1854. DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
  1855. DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
  1856. DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
  1857. DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
  1858. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
  1859. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
  1860. DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
  1861. DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
  1862. DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
  1863. DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
  1864. DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
  1865. DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
  1866. DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
  1867. DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
  1868. DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
  1869. DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
  1870. DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
  1871. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
  1872. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
  1873. DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
  1874. DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
  1875. DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
  1876. DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
  1877. DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
  1878. DECLARE_EVENT_CLASS(xfs_attr_class,
  1879. TP_PROTO(struct xfs_da_args *args),
  1880. TP_ARGS(args),
  1881. TP_STRUCT__entry(
  1882. __field(dev_t, dev)
  1883. __field(xfs_ino_t, ino)
  1884. __dynamic_array(char, name, args->namelen)
  1885. __field(int, namelen)
  1886. __field(int, valuelen)
  1887. __field(xfs_dahash_t, hashval)
  1888. __field(unsigned int, attr_filter)
  1889. __field(unsigned int, attr_flags)
  1890. __field(uint32_t, op_flags)
  1891. ),
  1892. TP_fast_assign(
  1893. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1894. __entry->ino = args->dp->i_ino;
  1895. if (args->namelen)
  1896. memcpy(__get_str(name), args->name, args->namelen);
  1897. __entry->namelen = args->namelen;
  1898. __entry->valuelen = args->valuelen;
  1899. __entry->hashval = args->hashval;
  1900. __entry->attr_filter = args->attr_filter;
  1901. __entry->attr_flags = args->attr_flags;
  1902. __entry->op_flags = args->op_flags;
  1903. ),
  1904. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d valuelen %d "
  1905. "hashval 0x%x filter %s flags %s op_flags %s",
  1906. MAJOR(__entry->dev), MINOR(__entry->dev),
  1907. __entry->ino,
  1908. __entry->namelen,
  1909. __entry->namelen ? __get_str(name) : NULL,
  1910. __entry->namelen,
  1911. __entry->valuelen,
  1912. __entry->hashval,
  1913. __print_flags(__entry->attr_filter, "|",
  1914. XFS_ATTR_FILTER_FLAGS),
  1915. __print_flags(__entry->attr_flags, "|",
  1916. { XATTR_CREATE, "CREATE" },
  1917. { XATTR_REPLACE, "REPLACE" }),
  1918. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1919. )
  1920. #define DEFINE_ATTR_EVENT(name) \
  1921. DEFINE_EVENT(xfs_attr_class, name, \
  1922. TP_PROTO(struct xfs_da_args *args), \
  1923. TP_ARGS(args))
  1924. DEFINE_ATTR_EVENT(xfs_attr_sf_add);
  1925. DEFINE_ATTR_EVENT(xfs_attr_sf_addname);
  1926. DEFINE_ATTR_EVENT(xfs_attr_sf_create);
  1927. DEFINE_ATTR_EVENT(xfs_attr_sf_lookup);
  1928. DEFINE_ATTR_EVENT(xfs_attr_sf_remove);
  1929. DEFINE_ATTR_EVENT(xfs_attr_sf_to_leaf);
  1930. DEFINE_ATTR_EVENT(xfs_attr_leaf_add);
  1931. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_old);
  1932. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_new);
  1933. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_work);
  1934. DEFINE_ATTR_EVENT(xfs_attr_leaf_create);
  1935. DEFINE_ATTR_EVENT(xfs_attr_leaf_compact);
  1936. DEFINE_ATTR_EVENT(xfs_attr_leaf_get);
  1937. DEFINE_ATTR_EVENT(xfs_attr_leaf_lookup);
  1938. DEFINE_ATTR_EVENT(xfs_attr_leaf_replace);
  1939. DEFINE_ATTR_EVENT(xfs_attr_leaf_remove);
  1940. DEFINE_ATTR_EVENT(xfs_attr_leaf_removename);
  1941. DEFINE_ATTR_EVENT(xfs_attr_leaf_split);
  1942. DEFINE_ATTR_EVENT(xfs_attr_leaf_split_before);
  1943. DEFINE_ATTR_EVENT(xfs_attr_leaf_split_after);
  1944. DEFINE_ATTR_EVENT(xfs_attr_leaf_clearflag);
  1945. DEFINE_ATTR_EVENT(xfs_attr_leaf_setflag);
  1946. DEFINE_ATTR_EVENT(xfs_attr_leaf_flipflags);
  1947. DEFINE_ATTR_EVENT(xfs_attr_leaf_to_sf);
  1948. DEFINE_ATTR_EVENT(xfs_attr_leaf_to_node);
  1949. DEFINE_ATTR_EVENT(xfs_attr_leaf_rebalance);
  1950. DEFINE_ATTR_EVENT(xfs_attr_leaf_unbalance);
  1951. DEFINE_ATTR_EVENT(xfs_attr_leaf_toosmall);
  1952. DEFINE_ATTR_EVENT(xfs_attr_node_addname);
  1953. DEFINE_ATTR_EVENT(xfs_attr_node_get);
  1954. DEFINE_ATTR_EVENT(xfs_attr_node_replace);
  1955. DEFINE_ATTR_EVENT(xfs_attr_node_removename);
  1956. DEFINE_ATTR_EVENT(xfs_attr_fillstate);
  1957. DEFINE_ATTR_EVENT(xfs_attr_refillstate);
  1958. DEFINE_ATTR_EVENT(xfs_attr_rmtval_get);
  1959. DEFINE_ATTR_EVENT(xfs_attr_rmtval_set);
  1960. #define DEFINE_DA_EVENT(name) \
  1961. DEFINE_EVENT(xfs_da_class, name, \
  1962. TP_PROTO(struct xfs_da_args *args), \
  1963. TP_ARGS(args))
  1964. DEFINE_DA_EVENT(xfs_da_split);
  1965. DEFINE_DA_EVENT(xfs_da_join);
  1966. DEFINE_DA_EVENT(xfs_da_link_before);
  1967. DEFINE_DA_EVENT(xfs_da_link_after);
  1968. DEFINE_DA_EVENT(xfs_da_unlink_back);
  1969. DEFINE_DA_EVENT(xfs_da_unlink_forward);
  1970. DEFINE_DA_EVENT(xfs_da_root_split);
  1971. DEFINE_DA_EVENT(xfs_da_root_join);
  1972. DEFINE_DA_EVENT(xfs_da_node_add);
  1973. DEFINE_DA_EVENT(xfs_da_node_create);
  1974. DEFINE_DA_EVENT(xfs_da_node_split);
  1975. DEFINE_DA_EVENT(xfs_da_node_remove);
  1976. DEFINE_DA_EVENT(xfs_da_node_rebalance);
  1977. DEFINE_DA_EVENT(xfs_da_node_unbalance);
  1978. DEFINE_DA_EVENT(xfs_da_node_toosmall);
  1979. DEFINE_DA_EVENT(xfs_da_swap_lastblock);
  1980. DEFINE_DA_EVENT(xfs_da_grow_inode);
  1981. DEFINE_DA_EVENT(xfs_da_shrink_inode);
  1982. DEFINE_DA_EVENT(xfs_da_fixhashpath);
  1983. DEFINE_DA_EVENT(xfs_da_path_shift);
  1984. DECLARE_EVENT_CLASS(xfs_dir2_space_class,
  1985. TP_PROTO(struct xfs_da_args *args, int idx),
  1986. TP_ARGS(args, idx),
  1987. TP_STRUCT__entry(
  1988. __field(dev_t, dev)
  1989. __field(xfs_ino_t, ino)
  1990. __field(uint32_t, op_flags)
  1991. __field(int, idx)
  1992. ),
  1993. TP_fast_assign(
  1994. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1995. __entry->ino = args->dp->i_ino;
  1996. __entry->op_flags = args->op_flags;
  1997. __entry->idx = idx;
  1998. ),
  1999. TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
  2000. MAJOR(__entry->dev), MINOR(__entry->dev),
  2001. __entry->ino,
  2002. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  2003. __entry->idx)
  2004. )
  2005. #define DEFINE_DIR2_SPACE_EVENT(name) \
  2006. DEFINE_EVENT(xfs_dir2_space_class, name, \
  2007. TP_PROTO(struct xfs_da_args *args, int idx), \
  2008. TP_ARGS(args, idx))
  2009. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
  2010. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
  2011. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
  2012. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
  2013. TRACE_EVENT(xfs_dir2_leafn_moveents,
  2014. TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
  2015. TP_ARGS(args, src_idx, dst_idx, count),
  2016. TP_STRUCT__entry(
  2017. __field(dev_t, dev)
  2018. __field(xfs_ino_t, ino)
  2019. __field(uint32_t, op_flags)
  2020. __field(int, src_idx)
  2021. __field(int, dst_idx)
  2022. __field(int, count)
  2023. ),
  2024. TP_fast_assign(
  2025. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  2026. __entry->ino = args->dp->i_ino;
  2027. __entry->op_flags = args->op_flags;
  2028. __entry->src_idx = src_idx;
  2029. __entry->dst_idx = dst_idx;
  2030. __entry->count = count;
  2031. ),
  2032. TP_printk("dev %d:%d ino 0x%llx op_flags %s "
  2033. "src_idx %d dst_idx %d count %d",
  2034. MAJOR(__entry->dev), MINOR(__entry->dev),
  2035. __entry->ino,
  2036. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  2037. __entry->src_idx,
  2038. __entry->dst_idx,
  2039. __entry->count)
  2040. );
  2041. #define XFS_SWAPEXT_INODES \
  2042. { 0, "target" }, \
  2043. { 1, "temp" }
  2044. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_DEV);
  2045. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_LOCAL);
  2046. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_EXTENTS);
  2047. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_BTREE);
  2048. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_UUID);
  2049. DECLARE_EVENT_CLASS(xfs_swap_extent_class,
  2050. TP_PROTO(struct xfs_inode *ip, int which),
  2051. TP_ARGS(ip, which),
  2052. TP_STRUCT__entry(
  2053. __field(dev_t, dev)
  2054. __field(int, which)
  2055. __field(xfs_ino_t, ino)
  2056. __field(int, format)
  2057. __field(xfs_extnum_t, nex)
  2058. __field(int, broot_size)
  2059. __field(int, fork_off)
  2060. ),
  2061. TP_fast_assign(
  2062. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  2063. __entry->which = which;
  2064. __entry->ino = ip->i_ino;
  2065. __entry->format = ip->i_df.if_format;
  2066. __entry->nex = ip->i_df.if_nextents;
  2067. __entry->broot_size = ip->i_df.if_broot_bytes;
  2068. __entry->fork_off = xfs_inode_fork_boff(ip);
  2069. ),
  2070. TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %llu, "
  2071. "broot size %d, forkoff 0x%x",
  2072. MAJOR(__entry->dev), MINOR(__entry->dev),
  2073. __entry->ino,
  2074. __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
  2075. __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
  2076. __entry->nex,
  2077. __entry->broot_size,
  2078. __entry->fork_off)
  2079. )
  2080. #define DEFINE_SWAPEXT_EVENT(name) \
  2081. DEFINE_EVENT(xfs_swap_extent_class, name, \
  2082. TP_PROTO(struct xfs_inode *ip, int which), \
  2083. TP_ARGS(ip, which))
  2084. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
  2085. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
  2086. TRACE_EVENT(xfs_log_recover,
  2087. TP_PROTO(struct xlog *log, xfs_daddr_t headblk, xfs_daddr_t tailblk),
  2088. TP_ARGS(log, headblk, tailblk),
  2089. TP_STRUCT__entry(
  2090. __field(dev_t, dev)
  2091. __field(xfs_daddr_t, headblk)
  2092. __field(xfs_daddr_t, tailblk)
  2093. ),
  2094. TP_fast_assign(
  2095. __entry->dev = log->l_mp->m_super->s_dev;
  2096. __entry->headblk = headblk;
  2097. __entry->tailblk = tailblk;
  2098. ),
  2099. TP_printk("dev %d:%d headblk 0x%llx tailblk 0x%llx",
  2100. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->headblk,
  2101. __entry->tailblk)
  2102. )
  2103. TRACE_EVENT(xfs_log_recover_record,
  2104. TP_PROTO(struct xlog *log, struct xlog_rec_header *rhead, int pass),
  2105. TP_ARGS(log, rhead, pass),
  2106. TP_STRUCT__entry(
  2107. __field(dev_t, dev)
  2108. __field(xfs_lsn_t, lsn)
  2109. __field(int, len)
  2110. __field(int, num_logops)
  2111. __field(int, pass)
  2112. ),
  2113. TP_fast_assign(
  2114. __entry->dev = log->l_mp->m_super->s_dev;
  2115. __entry->lsn = be64_to_cpu(rhead->h_lsn);
  2116. __entry->len = be32_to_cpu(rhead->h_len);
  2117. __entry->num_logops = be32_to_cpu(rhead->h_num_logops);
  2118. __entry->pass = pass;
  2119. ),
  2120. TP_printk("dev %d:%d lsn 0x%llx len 0x%x num_logops 0x%x pass %d",
  2121. MAJOR(__entry->dev), MINOR(__entry->dev),
  2122. __entry->lsn, __entry->len, __entry->num_logops,
  2123. __entry->pass)
  2124. )
  2125. DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
  2126. TP_PROTO(struct xlog *log, struct xlog_recover *trans,
  2127. struct xlog_recover_item *item, int pass),
  2128. TP_ARGS(log, trans, item, pass),
  2129. TP_STRUCT__entry(
  2130. __field(dev_t, dev)
  2131. __field(unsigned long, item)
  2132. __field(xlog_tid_t, tid)
  2133. __field(xfs_lsn_t, lsn)
  2134. __field(int, type)
  2135. __field(int, pass)
  2136. __field(int, count)
  2137. __field(int, total)
  2138. ),
  2139. TP_fast_assign(
  2140. __entry->dev = log->l_mp->m_super->s_dev;
  2141. __entry->item = (unsigned long)item;
  2142. __entry->tid = trans->r_log_tid;
  2143. __entry->lsn = trans->r_lsn;
  2144. __entry->type = ITEM_TYPE(item);
  2145. __entry->pass = pass;
  2146. __entry->count = item->ri_cnt;
  2147. __entry->total = item->ri_total;
  2148. ),
  2149. TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item %p, "
  2150. "item type %s item region count/total %d/%d",
  2151. MAJOR(__entry->dev), MINOR(__entry->dev),
  2152. __entry->tid,
  2153. __entry->lsn,
  2154. __entry->pass,
  2155. (void *)__entry->item,
  2156. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  2157. __entry->count,
  2158. __entry->total)
  2159. )
  2160. #define DEFINE_LOG_RECOVER_ITEM(name) \
  2161. DEFINE_EVENT(xfs_log_recover_item_class, name, \
  2162. TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
  2163. struct xlog_recover_item *item, int pass), \
  2164. TP_ARGS(log, trans, item, pass))
  2165. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
  2166. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
  2167. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
  2168. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
  2169. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
  2170. DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
  2171. TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
  2172. TP_ARGS(log, buf_f),
  2173. TP_STRUCT__entry(
  2174. __field(dev_t, dev)
  2175. __field(int64_t, blkno)
  2176. __field(unsigned short, len)
  2177. __field(unsigned short, flags)
  2178. __field(unsigned short, size)
  2179. __field(unsigned int, map_size)
  2180. ),
  2181. TP_fast_assign(
  2182. __entry->dev = log->l_mp->m_super->s_dev;
  2183. __entry->blkno = buf_f->blf_blkno;
  2184. __entry->len = buf_f->blf_len;
  2185. __entry->flags = buf_f->blf_flags;
  2186. __entry->size = buf_f->blf_size;
  2187. __entry->map_size = buf_f->blf_map_size;
  2188. ),
  2189. TP_printk("dev %d:%d daddr 0x%llx, bbcount 0x%x, flags 0x%x, size %d, "
  2190. "map_size %d",
  2191. MAJOR(__entry->dev), MINOR(__entry->dev),
  2192. __entry->blkno,
  2193. __entry->len,
  2194. __entry->flags,
  2195. __entry->size,
  2196. __entry->map_size)
  2197. )
  2198. #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
  2199. DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
  2200. TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
  2201. TP_ARGS(log, buf_f))
  2202. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
  2203. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
  2204. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
  2205. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
  2206. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
  2207. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_skip);
  2208. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
  2209. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
  2210. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
  2211. DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
  2212. TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
  2213. TP_ARGS(log, in_f),
  2214. TP_STRUCT__entry(
  2215. __field(dev_t, dev)
  2216. __field(xfs_ino_t, ino)
  2217. __field(unsigned short, size)
  2218. __field(int, fields)
  2219. __field(unsigned short, asize)
  2220. __field(unsigned short, dsize)
  2221. __field(int64_t, blkno)
  2222. __field(int, len)
  2223. __field(int, boffset)
  2224. ),
  2225. TP_fast_assign(
  2226. __entry->dev = log->l_mp->m_super->s_dev;
  2227. __entry->ino = in_f->ilf_ino;
  2228. __entry->size = in_f->ilf_size;
  2229. __entry->fields = in_f->ilf_fields;
  2230. __entry->asize = in_f->ilf_asize;
  2231. __entry->dsize = in_f->ilf_dsize;
  2232. __entry->blkno = in_f->ilf_blkno;
  2233. __entry->len = in_f->ilf_len;
  2234. __entry->boffset = in_f->ilf_boffset;
  2235. ),
  2236. TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
  2237. "dsize %d, daddr 0x%llx, bbcount 0x%x, boffset %d",
  2238. MAJOR(__entry->dev), MINOR(__entry->dev),
  2239. __entry->ino,
  2240. __entry->size,
  2241. __entry->fields,
  2242. __entry->asize,
  2243. __entry->dsize,
  2244. __entry->blkno,
  2245. __entry->len,
  2246. __entry->boffset)
  2247. )
  2248. #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
  2249. DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
  2250. TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
  2251. TP_ARGS(log, in_f))
  2252. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
  2253. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
  2254. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
  2255. DECLARE_EVENT_CLASS(xfs_log_recover_icreate_item_class,
  2256. TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f),
  2257. TP_ARGS(log, in_f),
  2258. TP_STRUCT__entry(
  2259. __field(dev_t, dev)
  2260. __field(xfs_agnumber_t, agno)
  2261. __field(xfs_agblock_t, agbno)
  2262. __field(unsigned int, count)
  2263. __field(unsigned int, isize)
  2264. __field(xfs_agblock_t, length)
  2265. __field(unsigned int, gen)
  2266. ),
  2267. TP_fast_assign(
  2268. __entry->dev = log->l_mp->m_super->s_dev;
  2269. __entry->agno = be32_to_cpu(in_f->icl_ag);
  2270. __entry->agbno = be32_to_cpu(in_f->icl_agbno);
  2271. __entry->count = be32_to_cpu(in_f->icl_count);
  2272. __entry->isize = be32_to_cpu(in_f->icl_isize);
  2273. __entry->length = be32_to_cpu(in_f->icl_length);
  2274. __entry->gen = be32_to_cpu(in_f->icl_gen);
  2275. ),
  2276. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x ireccount %u isize %u gen 0x%x",
  2277. MAJOR(__entry->dev), MINOR(__entry->dev),
  2278. __entry->agno,
  2279. __entry->agbno,
  2280. __entry->length,
  2281. __entry->count,
  2282. __entry->isize,
  2283. __entry->gen)
  2284. )
  2285. #define DEFINE_LOG_RECOVER_ICREATE_ITEM(name) \
  2286. DEFINE_EVENT(xfs_log_recover_icreate_item_class, name, \
  2287. TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f), \
  2288. TP_ARGS(log, in_f))
  2289. DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_cancel);
  2290. DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_recover);
  2291. DECLARE_EVENT_CLASS(xfs_discard_class,
  2292. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2293. xfs_agblock_t agbno, xfs_extlen_t len),
  2294. TP_ARGS(mp, agno, agbno, len),
  2295. TP_STRUCT__entry(
  2296. __field(dev_t, dev)
  2297. __field(xfs_agnumber_t, agno)
  2298. __field(xfs_agblock_t, agbno)
  2299. __field(xfs_extlen_t, len)
  2300. ),
  2301. TP_fast_assign(
  2302. __entry->dev = mp->m_super->s_dev;
  2303. __entry->agno = agno;
  2304. __entry->agbno = agbno;
  2305. __entry->len = len;
  2306. ),
  2307. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
  2308. MAJOR(__entry->dev), MINOR(__entry->dev),
  2309. __entry->agno,
  2310. __entry->agbno,
  2311. __entry->len)
  2312. )
  2313. #define DEFINE_DISCARD_EVENT(name) \
  2314. DEFINE_EVENT(xfs_discard_class, name, \
  2315. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2316. xfs_agblock_t agbno, xfs_extlen_t len), \
  2317. TP_ARGS(mp, agno, agbno, len))
  2318. DEFINE_DISCARD_EVENT(xfs_discard_extent);
  2319. DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
  2320. DEFINE_DISCARD_EVENT(xfs_discard_exclude);
  2321. DEFINE_DISCARD_EVENT(xfs_discard_busy);
  2322. /* btree cursor events */
  2323. TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi);
  2324. TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi);
  2325. TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi);
  2326. TRACE_DEFINE_ENUM(XFS_BTNUM_INOi);
  2327. TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi);
  2328. TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi);
  2329. TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi);
  2330. DECLARE_EVENT_CLASS(xfs_btree_cur_class,
  2331. TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
  2332. TP_ARGS(cur, level, bp),
  2333. TP_STRUCT__entry(
  2334. __field(dev_t, dev)
  2335. __field(xfs_btnum_t, btnum)
  2336. __field(int, level)
  2337. __field(int, nlevels)
  2338. __field(int, ptr)
  2339. __field(xfs_daddr_t, daddr)
  2340. ),
  2341. TP_fast_assign(
  2342. __entry->dev = cur->bc_mp->m_super->s_dev;
  2343. __entry->btnum = cur->bc_btnum;
  2344. __entry->level = level;
  2345. __entry->nlevels = cur->bc_nlevels;
  2346. __entry->ptr = cur->bc_levels[level].ptr;
  2347. __entry->daddr = bp ? xfs_buf_daddr(bp) : -1;
  2348. ),
  2349. TP_printk("dev %d:%d btree %s level %d/%d ptr %d daddr 0x%llx",
  2350. MAJOR(__entry->dev), MINOR(__entry->dev),
  2351. __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
  2352. __entry->level,
  2353. __entry->nlevels,
  2354. __entry->ptr,
  2355. (unsigned long long)__entry->daddr)
  2356. )
  2357. #define DEFINE_BTREE_CUR_EVENT(name) \
  2358. DEFINE_EVENT(xfs_btree_cur_class, name, \
  2359. TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), \
  2360. TP_ARGS(cur, level, bp))
  2361. DEFINE_BTREE_CUR_EVENT(xfs_btree_updkeys);
  2362. DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
  2363. /* deferred ops */
  2364. struct xfs_defer_pending;
  2365. DECLARE_EVENT_CLASS(xfs_defer_class,
  2366. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
  2367. TP_ARGS(tp, caller_ip),
  2368. TP_STRUCT__entry(
  2369. __field(dev_t, dev)
  2370. __field(struct xfs_trans *, tp)
  2371. __field(char, committed)
  2372. __field(unsigned long, caller_ip)
  2373. ),
  2374. TP_fast_assign(
  2375. __entry->dev = tp->t_mountp->m_super->s_dev;
  2376. __entry->tp = tp;
  2377. __entry->caller_ip = caller_ip;
  2378. ),
  2379. TP_printk("dev %d:%d tp %p caller %pS",
  2380. MAJOR(__entry->dev), MINOR(__entry->dev),
  2381. __entry->tp,
  2382. (char *)__entry->caller_ip)
  2383. )
  2384. #define DEFINE_DEFER_EVENT(name) \
  2385. DEFINE_EVENT(xfs_defer_class, name, \
  2386. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \
  2387. TP_ARGS(tp, caller_ip))
  2388. DECLARE_EVENT_CLASS(xfs_defer_error_class,
  2389. TP_PROTO(struct xfs_trans *tp, int error),
  2390. TP_ARGS(tp, error),
  2391. TP_STRUCT__entry(
  2392. __field(dev_t, dev)
  2393. __field(struct xfs_trans *, tp)
  2394. __field(char, committed)
  2395. __field(int, error)
  2396. ),
  2397. TP_fast_assign(
  2398. __entry->dev = tp->t_mountp->m_super->s_dev;
  2399. __entry->tp = tp;
  2400. __entry->error = error;
  2401. ),
  2402. TP_printk("dev %d:%d tp %p err %d",
  2403. MAJOR(__entry->dev), MINOR(__entry->dev),
  2404. __entry->tp,
  2405. __entry->error)
  2406. )
  2407. #define DEFINE_DEFER_ERROR_EVENT(name) \
  2408. DEFINE_EVENT(xfs_defer_error_class, name, \
  2409. TP_PROTO(struct xfs_trans *tp, int error), \
  2410. TP_ARGS(tp, error))
  2411. DECLARE_EVENT_CLASS(xfs_defer_pending_class,
  2412. TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp),
  2413. TP_ARGS(mp, dfp),
  2414. TP_STRUCT__entry(
  2415. __field(dev_t, dev)
  2416. __field(int, type)
  2417. __field(void *, intent)
  2418. __field(char, committed)
  2419. __field(int, nr)
  2420. ),
  2421. TP_fast_assign(
  2422. __entry->dev = mp ? mp->m_super->s_dev : 0;
  2423. __entry->type = dfp->dfp_type;
  2424. __entry->intent = dfp->dfp_intent;
  2425. __entry->committed = dfp->dfp_done != NULL;
  2426. __entry->nr = dfp->dfp_count;
  2427. ),
  2428. TP_printk("dev %d:%d optype %d intent %p committed %d nr %d",
  2429. MAJOR(__entry->dev), MINOR(__entry->dev),
  2430. __entry->type,
  2431. __entry->intent,
  2432. __entry->committed,
  2433. __entry->nr)
  2434. )
  2435. #define DEFINE_DEFER_PENDING_EVENT(name) \
  2436. DEFINE_EVENT(xfs_defer_pending_class, name, \
  2437. TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp), \
  2438. TP_ARGS(mp, dfp))
  2439. DECLARE_EVENT_CLASS(xfs_phys_extent_deferred_class,
  2440. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2441. int type, xfs_agblock_t agbno, xfs_extlen_t len),
  2442. TP_ARGS(mp, agno, type, agbno, len),
  2443. TP_STRUCT__entry(
  2444. __field(dev_t, dev)
  2445. __field(xfs_agnumber_t, agno)
  2446. __field(int, type)
  2447. __field(xfs_agblock_t, agbno)
  2448. __field(xfs_extlen_t, len)
  2449. ),
  2450. TP_fast_assign(
  2451. __entry->dev = mp->m_super->s_dev;
  2452. __entry->agno = agno;
  2453. __entry->type = type;
  2454. __entry->agbno = agbno;
  2455. __entry->len = len;
  2456. ),
  2457. TP_printk("dev %d:%d op %d agno 0x%x agbno 0x%x fsbcount 0x%x",
  2458. MAJOR(__entry->dev), MINOR(__entry->dev),
  2459. __entry->type,
  2460. __entry->agno,
  2461. __entry->agbno,
  2462. __entry->len)
  2463. );
  2464. #define DEFINE_PHYS_EXTENT_DEFERRED_EVENT(name) \
  2465. DEFINE_EVENT(xfs_phys_extent_deferred_class, name, \
  2466. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2467. int type, \
  2468. xfs_agblock_t bno, \
  2469. xfs_extlen_t len), \
  2470. TP_ARGS(mp, agno, type, bno, len))
  2471. DECLARE_EVENT_CLASS(xfs_map_extent_deferred_class,
  2472. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2473. int op,
  2474. xfs_agblock_t agbno,
  2475. xfs_ino_t ino,
  2476. int whichfork,
  2477. xfs_fileoff_t offset,
  2478. xfs_filblks_t len,
  2479. xfs_exntst_t state),
  2480. TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state),
  2481. TP_STRUCT__entry(
  2482. __field(dev_t, dev)
  2483. __field(xfs_agnumber_t, agno)
  2484. __field(xfs_ino_t, ino)
  2485. __field(xfs_agblock_t, agbno)
  2486. __field(int, whichfork)
  2487. __field(xfs_fileoff_t, l_loff)
  2488. __field(xfs_filblks_t, l_len)
  2489. __field(xfs_exntst_t, l_state)
  2490. __field(int, op)
  2491. ),
  2492. TP_fast_assign(
  2493. __entry->dev = mp->m_super->s_dev;
  2494. __entry->agno = agno;
  2495. __entry->ino = ino;
  2496. __entry->agbno = agbno;
  2497. __entry->whichfork = whichfork;
  2498. __entry->l_loff = offset;
  2499. __entry->l_len = len;
  2500. __entry->l_state = state;
  2501. __entry->op = op;
  2502. ),
  2503. TP_printk("dev %d:%d op %d agno 0x%x agbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d",
  2504. MAJOR(__entry->dev), MINOR(__entry->dev),
  2505. __entry->op,
  2506. __entry->agno,
  2507. __entry->agbno,
  2508. __entry->ino,
  2509. __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
  2510. __entry->l_loff,
  2511. __entry->l_len,
  2512. __entry->l_state)
  2513. );
  2514. #define DEFINE_MAP_EXTENT_DEFERRED_EVENT(name) \
  2515. DEFINE_EVENT(xfs_map_extent_deferred_class, name, \
  2516. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2517. int op, \
  2518. xfs_agblock_t agbno, \
  2519. xfs_ino_t ino, \
  2520. int whichfork, \
  2521. xfs_fileoff_t offset, \
  2522. xfs_filblks_t len, \
  2523. xfs_exntst_t state), \
  2524. TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state))
  2525. DEFINE_DEFER_EVENT(xfs_defer_cancel);
  2526. DEFINE_DEFER_EVENT(xfs_defer_trans_roll);
  2527. DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
  2528. DEFINE_DEFER_EVENT(xfs_defer_finish);
  2529. DEFINE_DEFER_EVENT(xfs_defer_finish_done);
  2530. DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error);
  2531. DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error);
  2532. DEFINE_DEFER_PENDING_EVENT(xfs_defer_create_intent);
  2533. DEFINE_DEFER_PENDING_EVENT(xfs_defer_cancel_list);
  2534. DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
  2535. DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
  2536. DEFINE_DEFER_PENDING_EVENT(xfs_defer_relog_intent);
  2537. #define DEFINE_BMAP_FREE_DEFERRED_EVENT DEFINE_PHYS_EXTENT_DEFERRED_EVENT
  2538. DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_bmap_free_defer);
  2539. DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_bmap_free_deferred);
  2540. DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_agfl_free_defer);
  2541. DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_agfl_free_deferred);
  2542. /* rmap tracepoints */
  2543. DECLARE_EVENT_CLASS(xfs_rmap_class,
  2544. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2545. xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten,
  2546. const struct xfs_owner_info *oinfo),
  2547. TP_ARGS(mp, agno, agbno, len, unwritten, oinfo),
  2548. TP_STRUCT__entry(
  2549. __field(dev_t, dev)
  2550. __field(xfs_agnumber_t, agno)
  2551. __field(xfs_agblock_t, agbno)
  2552. __field(xfs_extlen_t, len)
  2553. __field(uint64_t, owner)
  2554. __field(uint64_t, offset)
  2555. __field(unsigned long, flags)
  2556. ),
  2557. TP_fast_assign(
  2558. __entry->dev = mp->m_super->s_dev;
  2559. __entry->agno = agno;
  2560. __entry->agbno = agbno;
  2561. __entry->len = len;
  2562. __entry->owner = oinfo->oi_owner;
  2563. __entry->offset = oinfo->oi_offset;
  2564. __entry->flags = oinfo->oi_flags;
  2565. if (unwritten)
  2566. __entry->flags |= XFS_RMAP_UNWRITTEN;
  2567. ),
  2568. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%lx",
  2569. MAJOR(__entry->dev), MINOR(__entry->dev),
  2570. __entry->agno,
  2571. __entry->agbno,
  2572. __entry->len,
  2573. __entry->owner,
  2574. __entry->offset,
  2575. __entry->flags)
  2576. );
  2577. #define DEFINE_RMAP_EVENT(name) \
  2578. DEFINE_EVENT(xfs_rmap_class, name, \
  2579. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2580. xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \
  2581. const struct xfs_owner_info *oinfo), \
  2582. TP_ARGS(mp, agno, agbno, len, unwritten, oinfo))
  2583. /* simple AG-based error/%ip tracepoint class */
  2584. DECLARE_EVENT_CLASS(xfs_ag_error_class,
  2585. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error,
  2586. unsigned long caller_ip),
  2587. TP_ARGS(mp, agno, error, caller_ip),
  2588. TP_STRUCT__entry(
  2589. __field(dev_t, dev)
  2590. __field(xfs_agnumber_t, agno)
  2591. __field(int, error)
  2592. __field(unsigned long, caller_ip)
  2593. ),
  2594. TP_fast_assign(
  2595. __entry->dev = mp->m_super->s_dev;
  2596. __entry->agno = agno;
  2597. __entry->error = error;
  2598. __entry->caller_ip = caller_ip;
  2599. ),
  2600. TP_printk("dev %d:%d agno 0x%x error %d caller %pS",
  2601. MAJOR(__entry->dev), MINOR(__entry->dev),
  2602. __entry->agno,
  2603. __entry->error,
  2604. (char *)__entry->caller_ip)
  2605. );
  2606. #define DEFINE_AG_ERROR_EVENT(name) \
  2607. DEFINE_EVENT(xfs_ag_error_class, name, \
  2608. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error, \
  2609. unsigned long caller_ip), \
  2610. TP_ARGS(mp, agno, error, caller_ip))
  2611. DEFINE_RMAP_EVENT(xfs_rmap_unmap);
  2612. DEFINE_RMAP_EVENT(xfs_rmap_unmap_done);
  2613. DEFINE_AG_ERROR_EVENT(xfs_rmap_unmap_error);
  2614. DEFINE_RMAP_EVENT(xfs_rmap_map);
  2615. DEFINE_RMAP_EVENT(xfs_rmap_map_done);
  2616. DEFINE_AG_ERROR_EVENT(xfs_rmap_map_error);
  2617. DEFINE_RMAP_EVENT(xfs_rmap_convert);
  2618. DEFINE_RMAP_EVENT(xfs_rmap_convert_done);
  2619. DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_error);
  2620. DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_state);
  2621. DECLARE_EVENT_CLASS(xfs_rmapbt_class,
  2622. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2623. xfs_agblock_t agbno, xfs_extlen_t len,
  2624. uint64_t owner, uint64_t offset, unsigned int flags),
  2625. TP_ARGS(mp, agno, agbno, len, owner, offset, flags),
  2626. TP_STRUCT__entry(
  2627. __field(dev_t, dev)
  2628. __field(xfs_agnumber_t, agno)
  2629. __field(xfs_agblock_t, agbno)
  2630. __field(xfs_extlen_t, len)
  2631. __field(uint64_t, owner)
  2632. __field(uint64_t, offset)
  2633. __field(unsigned int, flags)
  2634. ),
  2635. TP_fast_assign(
  2636. __entry->dev = mp->m_super->s_dev;
  2637. __entry->agno = agno;
  2638. __entry->agbno = agbno;
  2639. __entry->len = len;
  2640. __entry->owner = owner;
  2641. __entry->offset = offset;
  2642. __entry->flags = flags;
  2643. ),
  2644. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
  2645. MAJOR(__entry->dev), MINOR(__entry->dev),
  2646. __entry->agno,
  2647. __entry->agbno,
  2648. __entry->len,
  2649. __entry->owner,
  2650. __entry->offset,
  2651. __entry->flags)
  2652. );
  2653. #define DEFINE_RMAPBT_EVENT(name) \
  2654. DEFINE_EVENT(xfs_rmapbt_class, name, \
  2655. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2656. xfs_agblock_t agbno, xfs_extlen_t len, \
  2657. uint64_t owner, uint64_t offset, unsigned int flags), \
  2658. TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
  2659. #define DEFINE_RMAP_DEFERRED_EVENT DEFINE_MAP_EXTENT_DEFERRED_EVENT
  2660. DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer);
  2661. DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred);
  2662. DEFINE_BUSY_EVENT(xfs_rmapbt_alloc_block);
  2663. DEFINE_BUSY_EVENT(xfs_rmapbt_free_block);
  2664. DEFINE_RMAPBT_EVENT(xfs_rmap_update);
  2665. DEFINE_RMAPBT_EVENT(xfs_rmap_insert);
  2666. DEFINE_RMAPBT_EVENT(xfs_rmap_delete);
  2667. DEFINE_AG_ERROR_EVENT(xfs_rmap_insert_error);
  2668. DEFINE_AG_ERROR_EVENT(xfs_rmap_delete_error);
  2669. DEFINE_AG_ERROR_EVENT(xfs_rmap_update_error);
  2670. DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_candidate);
  2671. DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_query);
  2672. DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_candidate);
  2673. DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range);
  2674. DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_result);
  2675. DEFINE_RMAPBT_EVENT(xfs_rmap_find_right_neighbor_result);
  2676. DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_result);
  2677. /* deferred bmbt updates */
  2678. #define DEFINE_BMAP_DEFERRED_EVENT DEFINE_RMAP_DEFERRED_EVENT
  2679. DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_defer);
  2680. DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_deferred);
  2681. /* per-AG reservation */
  2682. DECLARE_EVENT_CLASS(xfs_ag_resv_class,
  2683. TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type resv,
  2684. xfs_extlen_t len),
  2685. TP_ARGS(pag, resv, len),
  2686. TP_STRUCT__entry(
  2687. __field(dev_t, dev)
  2688. __field(xfs_agnumber_t, agno)
  2689. __field(int, resv)
  2690. __field(xfs_extlen_t, freeblks)
  2691. __field(xfs_extlen_t, flcount)
  2692. __field(xfs_extlen_t, reserved)
  2693. __field(xfs_extlen_t, asked)
  2694. __field(xfs_extlen_t, len)
  2695. ),
  2696. TP_fast_assign(
  2697. struct xfs_ag_resv *r = xfs_perag_resv(pag, resv);
  2698. __entry->dev = pag->pag_mount->m_super->s_dev;
  2699. __entry->agno = pag->pag_agno;
  2700. __entry->resv = resv;
  2701. __entry->freeblks = pag->pagf_freeblks;
  2702. __entry->flcount = pag->pagf_flcount;
  2703. __entry->reserved = r ? r->ar_reserved : 0;
  2704. __entry->asked = r ? r->ar_asked : 0;
  2705. __entry->len = len;
  2706. ),
  2707. TP_printk("dev %d:%d agno 0x%x resv %d freeblks %u flcount %u "
  2708. "resv %u ask %u len %u",
  2709. MAJOR(__entry->dev), MINOR(__entry->dev),
  2710. __entry->agno,
  2711. __entry->resv,
  2712. __entry->freeblks,
  2713. __entry->flcount,
  2714. __entry->reserved,
  2715. __entry->asked,
  2716. __entry->len)
  2717. )
  2718. #define DEFINE_AG_RESV_EVENT(name) \
  2719. DEFINE_EVENT(xfs_ag_resv_class, name, \
  2720. TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type type, \
  2721. xfs_extlen_t len), \
  2722. TP_ARGS(pag, type, len))
  2723. /* per-AG reservation tracepoints */
  2724. DEFINE_AG_RESV_EVENT(xfs_ag_resv_init);
  2725. DEFINE_AG_RESV_EVENT(xfs_ag_resv_free);
  2726. DEFINE_AG_RESV_EVENT(xfs_ag_resv_alloc_extent);
  2727. DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent);
  2728. DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical);
  2729. DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed);
  2730. DEFINE_AG_ERROR_EVENT(xfs_ag_resv_free_error);
  2731. DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
  2732. /* refcount tracepoint classes */
  2733. /* reuse the discard trace class for agbno/aglen-based traces */
  2734. #define DEFINE_AG_EXTENT_EVENT(name) DEFINE_DISCARD_EVENT(name)
  2735. /* ag btree lookup tracepoint class */
  2736. TRACE_DEFINE_ENUM(XFS_LOOKUP_EQi);
  2737. TRACE_DEFINE_ENUM(XFS_LOOKUP_LEi);
  2738. TRACE_DEFINE_ENUM(XFS_LOOKUP_GEi);
  2739. DECLARE_EVENT_CLASS(xfs_ag_btree_lookup_class,
  2740. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2741. xfs_agblock_t agbno, xfs_lookup_t dir),
  2742. TP_ARGS(mp, agno, agbno, dir),
  2743. TP_STRUCT__entry(
  2744. __field(dev_t, dev)
  2745. __field(xfs_agnumber_t, agno)
  2746. __field(xfs_agblock_t, agbno)
  2747. __field(xfs_lookup_t, dir)
  2748. ),
  2749. TP_fast_assign(
  2750. __entry->dev = mp->m_super->s_dev;
  2751. __entry->agno = agno;
  2752. __entry->agbno = agbno;
  2753. __entry->dir = dir;
  2754. ),
  2755. TP_printk("dev %d:%d agno 0x%x agbno 0x%x cmp %s(%d)",
  2756. MAJOR(__entry->dev), MINOR(__entry->dev),
  2757. __entry->agno,
  2758. __entry->agbno,
  2759. __print_symbolic(__entry->dir, XFS_AG_BTREE_CMP_FORMAT_STR),
  2760. __entry->dir)
  2761. )
  2762. #define DEFINE_AG_BTREE_LOOKUP_EVENT(name) \
  2763. DEFINE_EVENT(xfs_ag_btree_lookup_class, name, \
  2764. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2765. xfs_agblock_t agbno, xfs_lookup_t dir), \
  2766. TP_ARGS(mp, agno, agbno, dir))
  2767. /* single-rcext tracepoint class */
  2768. DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
  2769. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2770. struct xfs_refcount_irec *irec),
  2771. TP_ARGS(mp, agno, irec),
  2772. TP_STRUCT__entry(
  2773. __field(dev_t, dev)
  2774. __field(xfs_agnumber_t, agno)
  2775. __field(enum xfs_refc_domain, domain)
  2776. __field(xfs_agblock_t, startblock)
  2777. __field(xfs_extlen_t, blockcount)
  2778. __field(xfs_nlink_t, refcount)
  2779. ),
  2780. TP_fast_assign(
  2781. __entry->dev = mp->m_super->s_dev;
  2782. __entry->agno = agno;
  2783. __entry->domain = irec->rc_domain;
  2784. __entry->startblock = irec->rc_startblock;
  2785. __entry->blockcount = irec->rc_blockcount;
  2786. __entry->refcount = irec->rc_refcount;
  2787. ),
  2788. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u",
  2789. MAJOR(__entry->dev), MINOR(__entry->dev),
  2790. __entry->agno,
  2791. __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS),
  2792. __entry->startblock,
  2793. __entry->blockcount,
  2794. __entry->refcount)
  2795. )
  2796. #define DEFINE_REFCOUNT_EXTENT_EVENT(name) \
  2797. DEFINE_EVENT(xfs_refcount_extent_class, name, \
  2798. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2799. struct xfs_refcount_irec *irec), \
  2800. TP_ARGS(mp, agno, irec))
  2801. /* single-rcext and an agbno tracepoint class */
  2802. DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
  2803. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2804. struct xfs_refcount_irec *irec, xfs_agblock_t agbno),
  2805. TP_ARGS(mp, agno, irec, agbno),
  2806. TP_STRUCT__entry(
  2807. __field(dev_t, dev)
  2808. __field(xfs_agnumber_t, agno)
  2809. __field(enum xfs_refc_domain, domain)
  2810. __field(xfs_agblock_t, startblock)
  2811. __field(xfs_extlen_t, blockcount)
  2812. __field(xfs_nlink_t, refcount)
  2813. __field(xfs_agblock_t, agbno)
  2814. ),
  2815. TP_fast_assign(
  2816. __entry->dev = mp->m_super->s_dev;
  2817. __entry->agno = agno;
  2818. __entry->domain = irec->rc_domain;
  2819. __entry->startblock = irec->rc_startblock;
  2820. __entry->blockcount = irec->rc_blockcount;
  2821. __entry->refcount = irec->rc_refcount;
  2822. __entry->agbno = agbno;
  2823. ),
  2824. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x",
  2825. MAJOR(__entry->dev), MINOR(__entry->dev),
  2826. __entry->agno,
  2827. __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS),
  2828. __entry->startblock,
  2829. __entry->blockcount,
  2830. __entry->refcount,
  2831. __entry->agbno)
  2832. )
  2833. #define DEFINE_REFCOUNT_EXTENT_AT_EVENT(name) \
  2834. DEFINE_EVENT(xfs_refcount_extent_at_class, name, \
  2835. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2836. struct xfs_refcount_irec *irec, xfs_agblock_t agbno), \
  2837. TP_ARGS(mp, agno, irec, agbno))
  2838. /* double-rcext tracepoint class */
  2839. DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
  2840. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2841. struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2),
  2842. TP_ARGS(mp, agno, i1, i2),
  2843. TP_STRUCT__entry(
  2844. __field(dev_t, dev)
  2845. __field(xfs_agnumber_t, agno)
  2846. __field(enum xfs_refc_domain, i1_domain)
  2847. __field(xfs_agblock_t, i1_startblock)
  2848. __field(xfs_extlen_t, i1_blockcount)
  2849. __field(xfs_nlink_t, i1_refcount)
  2850. __field(enum xfs_refc_domain, i2_domain)
  2851. __field(xfs_agblock_t, i2_startblock)
  2852. __field(xfs_extlen_t, i2_blockcount)
  2853. __field(xfs_nlink_t, i2_refcount)
  2854. ),
  2855. TP_fast_assign(
  2856. __entry->dev = mp->m_super->s_dev;
  2857. __entry->agno = agno;
  2858. __entry->i1_domain = i1->rc_domain;
  2859. __entry->i1_startblock = i1->rc_startblock;
  2860. __entry->i1_blockcount = i1->rc_blockcount;
  2861. __entry->i1_refcount = i1->rc_refcount;
  2862. __entry->i2_domain = i2->rc_domain;
  2863. __entry->i2_startblock = i2->rc_startblock;
  2864. __entry->i2_blockcount = i2->rc_blockcount;
  2865. __entry->i2_refcount = i2->rc_refcount;
  2866. ),
  2867. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  2868. "dom %s agbno 0x%x fsbcount 0x%x refcount %u",
  2869. MAJOR(__entry->dev), MINOR(__entry->dev),
  2870. __entry->agno,
  2871. __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
  2872. __entry->i1_startblock,
  2873. __entry->i1_blockcount,
  2874. __entry->i1_refcount,
  2875. __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS),
  2876. __entry->i2_startblock,
  2877. __entry->i2_blockcount,
  2878. __entry->i2_refcount)
  2879. )
  2880. #define DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(name) \
  2881. DEFINE_EVENT(xfs_refcount_double_extent_class, name, \
  2882. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2883. struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2), \
  2884. TP_ARGS(mp, agno, i1, i2))
  2885. /* double-rcext and an agbno tracepoint class */
  2886. DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
  2887. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2888. struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2,
  2889. xfs_agblock_t agbno),
  2890. TP_ARGS(mp, agno, i1, i2, agbno),
  2891. TP_STRUCT__entry(
  2892. __field(dev_t, dev)
  2893. __field(xfs_agnumber_t, agno)
  2894. __field(enum xfs_refc_domain, i1_domain)
  2895. __field(xfs_agblock_t, i1_startblock)
  2896. __field(xfs_extlen_t, i1_blockcount)
  2897. __field(xfs_nlink_t, i1_refcount)
  2898. __field(enum xfs_refc_domain, i2_domain)
  2899. __field(xfs_agblock_t, i2_startblock)
  2900. __field(xfs_extlen_t, i2_blockcount)
  2901. __field(xfs_nlink_t, i2_refcount)
  2902. __field(xfs_agblock_t, agbno)
  2903. ),
  2904. TP_fast_assign(
  2905. __entry->dev = mp->m_super->s_dev;
  2906. __entry->agno = agno;
  2907. __entry->i1_domain = i1->rc_domain;
  2908. __entry->i1_startblock = i1->rc_startblock;
  2909. __entry->i1_blockcount = i1->rc_blockcount;
  2910. __entry->i1_refcount = i1->rc_refcount;
  2911. __entry->i2_domain = i2->rc_domain;
  2912. __entry->i2_startblock = i2->rc_startblock;
  2913. __entry->i2_blockcount = i2->rc_blockcount;
  2914. __entry->i2_refcount = i2->rc_refcount;
  2915. __entry->agbno = agbno;
  2916. ),
  2917. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  2918. "dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x",
  2919. MAJOR(__entry->dev), MINOR(__entry->dev),
  2920. __entry->agno,
  2921. __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
  2922. __entry->i1_startblock,
  2923. __entry->i1_blockcount,
  2924. __entry->i1_refcount,
  2925. __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS),
  2926. __entry->i2_startblock,
  2927. __entry->i2_blockcount,
  2928. __entry->i2_refcount,
  2929. __entry->agbno)
  2930. )
  2931. #define DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(name) \
  2932. DEFINE_EVENT(xfs_refcount_double_extent_at_class, name, \
  2933. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2934. struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, \
  2935. xfs_agblock_t agbno), \
  2936. TP_ARGS(mp, agno, i1, i2, agbno))
  2937. /* triple-rcext tracepoint class */
  2938. DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
  2939. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2940. struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2,
  2941. struct xfs_refcount_irec *i3),
  2942. TP_ARGS(mp, agno, i1, i2, i3),
  2943. TP_STRUCT__entry(
  2944. __field(dev_t, dev)
  2945. __field(xfs_agnumber_t, agno)
  2946. __field(enum xfs_refc_domain, i1_domain)
  2947. __field(xfs_agblock_t, i1_startblock)
  2948. __field(xfs_extlen_t, i1_blockcount)
  2949. __field(xfs_nlink_t, i1_refcount)
  2950. __field(enum xfs_refc_domain, i2_domain)
  2951. __field(xfs_agblock_t, i2_startblock)
  2952. __field(xfs_extlen_t, i2_blockcount)
  2953. __field(xfs_nlink_t, i2_refcount)
  2954. __field(enum xfs_refc_domain, i3_domain)
  2955. __field(xfs_agblock_t, i3_startblock)
  2956. __field(xfs_extlen_t, i3_blockcount)
  2957. __field(xfs_nlink_t, i3_refcount)
  2958. ),
  2959. TP_fast_assign(
  2960. __entry->dev = mp->m_super->s_dev;
  2961. __entry->agno = agno;
  2962. __entry->i1_domain = i1->rc_domain;
  2963. __entry->i1_startblock = i1->rc_startblock;
  2964. __entry->i1_blockcount = i1->rc_blockcount;
  2965. __entry->i1_refcount = i1->rc_refcount;
  2966. __entry->i2_domain = i2->rc_domain;
  2967. __entry->i2_startblock = i2->rc_startblock;
  2968. __entry->i2_blockcount = i2->rc_blockcount;
  2969. __entry->i2_refcount = i2->rc_refcount;
  2970. __entry->i3_domain = i3->rc_domain;
  2971. __entry->i3_startblock = i3->rc_startblock;
  2972. __entry->i3_blockcount = i3->rc_blockcount;
  2973. __entry->i3_refcount = i3->rc_refcount;
  2974. ),
  2975. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  2976. "dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  2977. "dom %s agbno 0x%x fsbcount 0x%x refcount %u",
  2978. MAJOR(__entry->dev), MINOR(__entry->dev),
  2979. __entry->agno,
  2980. __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
  2981. __entry->i1_startblock,
  2982. __entry->i1_blockcount,
  2983. __entry->i1_refcount,
  2984. __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS),
  2985. __entry->i2_startblock,
  2986. __entry->i2_blockcount,
  2987. __entry->i2_refcount,
  2988. __print_symbolic(__entry->i3_domain, XFS_REFC_DOMAIN_STRINGS),
  2989. __entry->i3_startblock,
  2990. __entry->i3_blockcount,
  2991. __entry->i3_refcount)
  2992. );
  2993. #define DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(name) \
  2994. DEFINE_EVENT(xfs_refcount_triple_extent_class, name, \
  2995. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2996. struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, \
  2997. struct xfs_refcount_irec *i3), \
  2998. TP_ARGS(mp, agno, i1, i2, i3))
  2999. /* refcount btree tracepoints */
  3000. DEFINE_BUSY_EVENT(xfs_refcountbt_alloc_block);
  3001. DEFINE_BUSY_EVENT(xfs_refcountbt_free_block);
  3002. DEFINE_AG_BTREE_LOOKUP_EVENT(xfs_refcount_lookup);
  3003. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_get);
  3004. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_update);
  3005. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_insert);
  3006. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_delete);
  3007. DEFINE_AG_ERROR_EVENT(xfs_refcount_insert_error);
  3008. DEFINE_AG_ERROR_EVENT(xfs_refcount_delete_error);
  3009. DEFINE_AG_ERROR_EVENT(xfs_refcount_update_error);
  3010. /* refcount adjustment tracepoints */
  3011. DEFINE_AG_EXTENT_EVENT(xfs_refcount_increase);
  3012. DEFINE_AG_EXTENT_EVENT(xfs_refcount_decrease);
  3013. DEFINE_AG_EXTENT_EVENT(xfs_refcount_cow_increase);
  3014. DEFINE_AG_EXTENT_EVENT(xfs_refcount_cow_decrease);
  3015. DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(xfs_refcount_merge_center_extents);
  3016. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_modify_extent);
  3017. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_recover_extent);
  3018. DEFINE_REFCOUNT_EXTENT_AT_EVENT(xfs_refcount_split_extent);
  3019. DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_left_extent);
  3020. DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_right_extent);
  3021. DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_left_extent);
  3022. DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_right_extent);
  3023. DEFINE_AG_ERROR_EVENT(xfs_refcount_adjust_error);
  3024. DEFINE_AG_ERROR_EVENT(xfs_refcount_adjust_cow_error);
  3025. DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_center_extents_error);
  3026. DEFINE_AG_ERROR_EVENT(xfs_refcount_modify_extent_error);
  3027. DEFINE_AG_ERROR_EVENT(xfs_refcount_split_extent_error);
  3028. DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_left_extent_error);
  3029. DEFINE_AG_ERROR_EVENT(xfs_refcount_merge_right_extent_error);
  3030. DEFINE_AG_ERROR_EVENT(xfs_refcount_find_left_extent_error);
  3031. DEFINE_AG_ERROR_EVENT(xfs_refcount_find_right_extent_error);
  3032. /* reflink helpers */
  3033. DEFINE_AG_EXTENT_EVENT(xfs_refcount_find_shared);
  3034. DEFINE_AG_EXTENT_EVENT(xfs_refcount_find_shared_result);
  3035. DEFINE_AG_ERROR_EVENT(xfs_refcount_find_shared_error);
  3036. #define DEFINE_REFCOUNT_DEFERRED_EVENT DEFINE_PHYS_EXTENT_DEFERRED_EVENT
  3037. DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_defer);
  3038. DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_deferred);
  3039. TRACE_EVENT(xfs_refcount_finish_one_leftover,
  3040. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  3041. int type, xfs_agblock_t agbno, xfs_extlen_t len,
  3042. xfs_agblock_t new_agbno, xfs_extlen_t new_len),
  3043. TP_ARGS(mp, agno, type, agbno, len, new_agbno, new_len),
  3044. TP_STRUCT__entry(
  3045. __field(dev_t, dev)
  3046. __field(xfs_agnumber_t, agno)
  3047. __field(int, type)
  3048. __field(xfs_agblock_t, agbno)
  3049. __field(xfs_extlen_t, len)
  3050. __field(xfs_agblock_t, new_agbno)
  3051. __field(xfs_extlen_t, new_len)
  3052. ),
  3053. TP_fast_assign(
  3054. __entry->dev = mp->m_super->s_dev;
  3055. __entry->agno = agno;
  3056. __entry->type = type;
  3057. __entry->agbno = agbno;
  3058. __entry->len = len;
  3059. __entry->new_agbno = new_agbno;
  3060. __entry->new_len = new_len;
  3061. ),
  3062. TP_printk("dev %d:%d type %d agno 0x%x agbno 0x%x fsbcount 0x%x new_agbno 0x%x new_fsbcount 0x%x",
  3063. MAJOR(__entry->dev), MINOR(__entry->dev),
  3064. __entry->type,
  3065. __entry->agno,
  3066. __entry->agbno,
  3067. __entry->len,
  3068. __entry->new_agbno,
  3069. __entry->new_len)
  3070. );
  3071. /* simple inode-based error/%ip tracepoint class */
  3072. DECLARE_EVENT_CLASS(xfs_inode_error_class,
  3073. TP_PROTO(struct xfs_inode *ip, int error, unsigned long caller_ip),
  3074. TP_ARGS(ip, error, caller_ip),
  3075. TP_STRUCT__entry(
  3076. __field(dev_t, dev)
  3077. __field(xfs_ino_t, ino)
  3078. __field(int, error)
  3079. __field(unsigned long, caller_ip)
  3080. ),
  3081. TP_fast_assign(
  3082. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3083. __entry->ino = ip->i_ino;
  3084. __entry->error = error;
  3085. __entry->caller_ip = caller_ip;
  3086. ),
  3087. TP_printk("dev %d:%d ino 0x%llx error %d caller %pS",
  3088. MAJOR(__entry->dev), MINOR(__entry->dev),
  3089. __entry->ino,
  3090. __entry->error,
  3091. (char *)__entry->caller_ip)
  3092. );
  3093. #define DEFINE_INODE_ERROR_EVENT(name) \
  3094. DEFINE_EVENT(xfs_inode_error_class, name, \
  3095. TP_PROTO(struct xfs_inode *ip, int error, \
  3096. unsigned long caller_ip), \
  3097. TP_ARGS(ip, error, caller_ip))
  3098. /* reflink tracepoint classes */
  3099. /* two-file io tracepoint class */
  3100. DECLARE_EVENT_CLASS(xfs_double_io_class,
  3101. TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len,
  3102. struct xfs_inode *dest, xfs_off_t doffset),
  3103. TP_ARGS(src, soffset, len, dest, doffset),
  3104. TP_STRUCT__entry(
  3105. __field(dev_t, dev)
  3106. __field(xfs_ino_t, src_ino)
  3107. __field(loff_t, src_isize)
  3108. __field(loff_t, src_disize)
  3109. __field(loff_t, src_offset)
  3110. __field(long long, len)
  3111. __field(xfs_ino_t, dest_ino)
  3112. __field(loff_t, dest_isize)
  3113. __field(loff_t, dest_disize)
  3114. __field(loff_t, dest_offset)
  3115. ),
  3116. TP_fast_assign(
  3117. __entry->dev = VFS_I(src)->i_sb->s_dev;
  3118. __entry->src_ino = src->i_ino;
  3119. __entry->src_isize = VFS_I(src)->i_size;
  3120. __entry->src_disize = src->i_disk_size;
  3121. __entry->src_offset = soffset;
  3122. __entry->len = len;
  3123. __entry->dest_ino = dest->i_ino;
  3124. __entry->dest_isize = VFS_I(dest)->i_size;
  3125. __entry->dest_disize = dest->i_disk_size;
  3126. __entry->dest_offset = doffset;
  3127. ),
  3128. TP_printk("dev %d:%d bytecount 0x%llx "
  3129. "ino 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx -> "
  3130. "ino 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx",
  3131. MAJOR(__entry->dev), MINOR(__entry->dev),
  3132. __entry->len,
  3133. __entry->src_ino,
  3134. __entry->src_isize,
  3135. __entry->src_disize,
  3136. __entry->src_offset,
  3137. __entry->dest_ino,
  3138. __entry->dest_isize,
  3139. __entry->dest_disize,
  3140. __entry->dest_offset)
  3141. )
  3142. #define DEFINE_DOUBLE_IO_EVENT(name) \
  3143. DEFINE_EVENT(xfs_double_io_class, name, \
  3144. TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len, \
  3145. struct xfs_inode *dest, xfs_off_t doffset), \
  3146. TP_ARGS(src, soffset, len, dest, doffset))
  3147. /* inode/irec events */
  3148. DECLARE_EVENT_CLASS(xfs_inode_irec_class,
  3149. TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec),
  3150. TP_ARGS(ip, irec),
  3151. TP_STRUCT__entry(
  3152. __field(dev_t, dev)
  3153. __field(xfs_ino_t, ino)
  3154. __field(xfs_fileoff_t, lblk)
  3155. __field(xfs_extlen_t, len)
  3156. __field(xfs_fsblock_t, pblk)
  3157. __field(int, state)
  3158. ),
  3159. TP_fast_assign(
  3160. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3161. __entry->ino = ip->i_ino;
  3162. __entry->lblk = irec->br_startoff;
  3163. __entry->len = irec->br_blockcount;
  3164. __entry->pblk = irec->br_startblock;
  3165. __entry->state = irec->br_state;
  3166. ),
  3167. TP_printk("dev %d:%d ino 0x%llx fileoff 0x%llx fsbcount 0x%x startblock 0x%llx st %d",
  3168. MAJOR(__entry->dev), MINOR(__entry->dev),
  3169. __entry->ino,
  3170. __entry->lblk,
  3171. __entry->len,
  3172. __entry->pblk,
  3173. __entry->state)
  3174. );
  3175. #define DEFINE_INODE_IREC_EVENT(name) \
  3176. DEFINE_EVENT(xfs_inode_irec_class, name, \
  3177. TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec), \
  3178. TP_ARGS(ip, irec))
  3179. /* refcount/reflink tracepoint definitions */
  3180. /* reflink tracepoints */
  3181. DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag);
  3182. DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag);
  3183. DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size);
  3184. TRACE_EVENT(xfs_reflink_remap_blocks,
  3185. TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset,
  3186. xfs_filblks_t len, struct xfs_inode *dest,
  3187. xfs_fileoff_t doffset),
  3188. TP_ARGS(src, soffset, len, dest, doffset),
  3189. TP_STRUCT__entry(
  3190. __field(dev_t, dev)
  3191. __field(xfs_ino_t, src_ino)
  3192. __field(xfs_fileoff_t, src_lblk)
  3193. __field(xfs_filblks_t, len)
  3194. __field(xfs_ino_t, dest_ino)
  3195. __field(xfs_fileoff_t, dest_lblk)
  3196. ),
  3197. TP_fast_assign(
  3198. __entry->dev = VFS_I(src)->i_sb->s_dev;
  3199. __entry->src_ino = src->i_ino;
  3200. __entry->src_lblk = soffset;
  3201. __entry->len = len;
  3202. __entry->dest_ino = dest->i_ino;
  3203. __entry->dest_lblk = doffset;
  3204. ),
  3205. TP_printk("dev %d:%d fsbcount 0x%llx "
  3206. "ino 0x%llx fileoff 0x%llx -> ino 0x%llx fileoff 0x%llx",
  3207. MAJOR(__entry->dev), MINOR(__entry->dev),
  3208. __entry->len,
  3209. __entry->src_ino,
  3210. __entry->src_lblk,
  3211. __entry->dest_ino,
  3212. __entry->dest_lblk)
  3213. );
  3214. DEFINE_DOUBLE_IO_EVENT(xfs_reflink_remap_range);
  3215. DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_range_error);
  3216. DEFINE_INODE_ERROR_EVENT(xfs_reflink_set_inode_flag_error);
  3217. DEFINE_INODE_ERROR_EVENT(xfs_reflink_update_inode_size_error);
  3218. DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_blocks_error);
  3219. DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error);
  3220. DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_src);
  3221. DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_dest);
  3222. /* dedupe tracepoints */
  3223. DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents);
  3224. DEFINE_INODE_ERROR_EVENT(xfs_reflink_compare_extents_error);
  3225. /* ioctl tracepoints */
  3226. TRACE_EVENT(xfs_ioctl_clone,
  3227. TP_PROTO(struct inode *src, struct inode *dest),
  3228. TP_ARGS(src, dest),
  3229. TP_STRUCT__entry(
  3230. __field(dev_t, dev)
  3231. __field(unsigned long, src_ino)
  3232. __field(loff_t, src_isize)
  3233. __field(unsigned long, dest_ino)
  3234. __field(loff_t, dest_isize)
  3235. ),
  3236. TP_fast_assign(
  3237. __entry->dev = src->i_sb->s_dev;
  3238. __entry->src_ino = src->i_ino;
  3239. __entry->src_isize = i_size_read(src);
  3240. __entry->dest_ino = dest->i_ino;
  3241. __entry->dest_isize = i_size_read(dest);
  3242. ),
  3243. TP_printk("dev %d:%d ino 0x%lx isize 0x%llx -> ino 0x%lx isize 0x%llx",
  3244. MAJOR(__entry->dev), MINOR(__entry->dev),
  3245. __entry->src_ino,
  3246. __entry->src_isize,
  3247. __entry->dest_ino,
  3248. __entry->dest_isize)
  3249. );
  3250. /* unshare tracepoints */
  3251. DEFINE_SIMPLE_IO_EVENT(xfs_reflink_unshare);
  3252. DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error);
  3253. /* copy on write */
  3254. DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared);
  3255. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
  3256. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
  3257. DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
  3258. DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
  3259. DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
  3260. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_from);
  3261. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_to);
  3262. DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
  3263. DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
  3264. DEFINE_INODE_IREC_EVENT(xfs_reflink_cancel_cow);
  3265. /* rmap swapext tracepoints */
  3266. DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap);
  3267. DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap_piece);
  3268. DEFINE_INODE_ERROR_EVENT(xfs_swap_extent_rmap_error);
  3269. /* fsmap traces */
  3270. DECLARE_EVENT_CLASS(xfs_fsmap_class,
  3271. TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno,
  3272. const struct xfs_rmap_irec *rmap),
  3273. TP_ARGS(mp, keydev, agno, rmap),
  3274. TP_STRUCT__entry(
  3275. __field(dev_t, dev)
  3276. __field(dev_t, keydev)
  3277. __field(xfs_agnumber_t, agno)
  3278. __field(xfs_fsblock_t, bno)
  3279. __field(xfs_filblks_t, len)
  3280. __field(uint64_t, owner)
  3281. __field(uint64_t, offset)
  3282. __field(unsigned int, flags)
  3283. ),
  3284. TP_fast_assign(
  3285. __entry->dev = mp->m_super->s_dev;
  3286. __entry->keydev = new_decode_dev(keydev);
  3287. __entry->agno = agno;
  3288. __entry->bno = rmap->rm_startblock;
  3289. __entry->len = rmap->rm_blockcount;
  3290. __entry->owner = rmap->rm_owner;
  3291. __entry->offset = rmap->rm_offset;
  3292. __entry->flags = rmap->rm_flags;
  3293. ),
  3294. TP_printk("dev %d:%d keydev %d:%d agno 0x%x startblock 0x%llx fsbcount 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x",
  3295. MAJOR(__entry->dev), MINOR(__entry->dev),
  3296. MAJOR(__entry->keydev), MINOR(__entry->keydev),
  3297. __entry->agno,
  3298. __entry->bno,
  3299. __entry->len,
  3300. __entry->owner,
  3301. __entry->offset,
  3302. __entry->flags)
  3303. )
  3304. #define DEFINE_FSMAP_EVENT(name) \
  3305. DEFINE_EVENT(xfs_fsmap_class, name, \
  3306. TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno, \
  3307. const struct xfs_rmap_irec *rmap), \
  3308. TP_ARGS(mp, keydev, agno, rmap))
  3309. DEFINE_FSMAP_EVENT(xfs_fsmap_low_key);
  3310. DEFINE_FSMAP_EVENT(xfs_fsmap_high_key);
  3311. DEFINE_FSMAP_EVENT(xfs_fsmap_mapping);
  3312. DECLARE_EVENT_CLASS(xfs_getfsmap_class,
  3313. TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap),
  3314. TP_ARGS(mp, fsmap),
  3315. TP_STRUCT__entry(
  3316. __field(dev_t, dev)
  3317. __field(dev_t, keydev)
  3318. __field(xfs_daddr_t, block)
  3319. __field(xfs_daddr_t, len)
  3320. __field(uint64_t, owner)
  3321. __field(uint64_t, offset)
  3322. __field(uint64_t, flags)
  3323. ),
  3324. TP_fast_assign(
  3325. __entry->dev = mp->m_super->s_dev;
  3326. __entry->keydev = new_decode_dev(fsmap->fmr_device);
  3327. __entry->block = fsmap->fmr_physical;
  3328. __entry->len = fsmap->fmr_length;
  3329. __entry->owner = fsmap->fmr_owner;
  3330. __entry->offset = fsmap->fmr_offset;
  3331. __entry->flags = fsmap->fmr_flags;
  3332. ),
  3333. TP_printk("dev %d:%d keydev %d:%d daddr 0x%llx bbcount 0x%llx owner 0x%llx fileoff_daddr 0x%llx flags 0x%llx",
  3334. MAJOR(__entry->dev), MINOR(__entry->dev),
  3335. MAJOR(__entry->keydev), MINOR(__entry->keydev),
  3336. __entry->block,
  3337. __entry->len,
  3338. __entry->owner,
  3339. __entry->offset,
  3340. __entry->flags)
  3341. )
  3342. #define DEFINE_GETFSMAP_EVENT(name) \
  3343. DEFINE_EVENT(xfs_getfsmap_class, name, \
  3344. TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap), \
  3345. TP_ARGS(mp, fsmap))
  3346. DEFINE_GETFSMAP_EVENT(xfs_getfsmap_low_key);
  3347. DEFINE_GETFSMAP_EVENT(xfs_getfsmap_high_key);
  3348. DEFINE_GETFSMAP_EVENT(xfs_getfsmap_mapping);
  3349. DECLARE_EVENT_CLASS(xfs_trans_resv_class,
  3350. TP_PROTO(struct xfs_mount *mp, unsigned int type,
  3351. struct xfs_trans_res *res),
  3352. TP_ARGS(mp, type, res),
  3353. TP_STRUCT__entry(
  3354. __field(dev_t, dev)
  3355. __field(int, type)
  3356. __field(uint, logres)
  3357. __field(int, logcount)
  3358. __field(int, logflags)
  3359. ),
  3360. TP_fast_assign(
  3361. __entry->dev = mp->m_super->s_dev;
  3362. __entry->type = type;
  3363. __entry->logres = res->tr_logres;
  3364. __entry->logcount = res->tr_logcount;
  3365. __entry->logflags = res->tr_logflags;
  3366. ),
  3367. TP_printk("dev %d:%d type %d logres %u logcount %d flags 0x%x",
  3368. MAJOR(__entry->dev), MINOR(__entry->dev),
  3369. __entry->type,
  3370. __entry->logres,
  3371. __entry->logcount,
  3372. __entry->logflags)
  3373. )
  3374. #define DEFINE_TRANS_RESV_EVENT(name) \
  3375. DEFINE_EVENT(xfs_trans_resv_class, name, \
  3376. TP_PROTO(struct xfs_mount *mp, unsigned int type, \
  3377. struct xfs_trans_res *res), \
  3378. TP_ARGS(mp, type, res))
  3379. DEFINE_TRANS_RESV_EVENT(xfs_trans_resv_calc);
  3380. DEFINE_TRANS_RESV_EVENT(xfs_trans_resv_calc_minlogsize);
  3381. TRACE_EVENT(xfs_log_get_max_trans_res,
  3382. TP_PROTO(struct xfs_mount *mp, const struct xfs_trans_res *res),
  3383. TP_ARGS(mp, res),
  3384. TP_STRUCT__entry(
  3385. __field(dev_t, dev)
  3386. __field(uint, logres)
  3387. __field(int, logcount)
  3388. ),
  3389. TP_fast_assign(
  3390. __entry->dev = mp->m_super->s_dev;
  3391. __entry->logres = res->tr_logres;
  3392. __entry->logcount = res->tr_logcount;
  3393. ),
  3394. TP_printk("dev %d:%d logres %u logcount %d",
  3395. MAJOR(__entry->dev), MINOR(__entry->dev),
  3396. __entry->logres,
  3397. __entry->logcount)
  3398. );
  3399. DECLARE_EVENT_CLASS(xfs_trans_class,
  3400. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
  3401. TP_ARGS(tp, caller_ip),
  3402. TP_STRUCT__entry(
  3403. __field(dev_t, dev)
  3404. __field(uint32_t, tid)
  3405. __field(uint32_t, flags)
  3406. __field(unsigned long, caller_ip)
  3407. ),
  3408. TP_fast_assign(
  3409. __entry->dev = tp->t_mountp->m_super->s_dev;
  3410. __entry->tid = 0;
  3411. if (tp->t_ticket)
  3412. __entry->tid = tp->t_ticket->t_tid;
  3413. __entry->flags = tp->t_flags;
  3414. __entry->caller_ip = caller_ip;
  3415. ),
  3416. TP_printk("dev %d:%d trans %x flags 0x%x caller %pS",
  3417. MAJOR(__entry->dev), MINOR(__entry->dev),
  3418. __entry->tid,
  3419. __entry->flags,
  3420. (char *)__entry->caller_ip)
  3421. )
  3422. #define DEFINE_TRANS_EVENT(name) \
  3423. DEFINE_EVENT(xfs_trans_class, name, \
  3424. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \
  3425. TP_ARGS(tp, caller_ip))
  3426. DEFINE_TRANS_EVENT(xfs_trans_alloc);
  3427. DEFINE_TRANS_EVENT(xfs_trans_cancel);
  3428. DEFINE_TRANS_EVENT(xfs_trans_commit);
  3429. DEFINE_TRANS_EVENT(xfs_trans_dup);
  3430. DEFINE_TRANS_EVENT(xfs_trans_free);
  3431. DEFINE_TRANS_EVENT(xfs_trans_roll);
  3432. DEFINE_TRANS_EVENT(xfs_trans_add_item);
  3433. DEFINE_TRANS_EVENT(xfs_trans_commit_items);
  3434. DEFINE_TRANS_EVENT(xfs_trans_free_items);
  3435. TRACE_EVENT(xfs_iunlink_update_bucket,
  3436. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int bucket,
  3437. xfs_agino_t old_ptr, xfs_agino_t new_ptr),
  3438. TP_ARGS(mp, agno, bucket, old_ptr, new_ptr),
  3439. TP_STRUCT__entry(
  3440. __field(dev_t, dev)
  3441. __field(xfs_agnumber_t, agno)
  3442. __field(unsigned int, bucket)
  3443. __field(xfs_agino_t, old_ptr)
  3444. __field(xfs_agino_t, new_ptr)
  3445. ),
  3446. TP_fast_assign(
  3447. __entry->dev = mp->m_super->s_dev;
  3448. __entry->agno = agno;
  3449. __entry->bucket = bucket;
  3450. __entry->old_ptr = old_ptr;
  3451. __entry->new_ptr = new_ptr;
  3452. ),
  3453. TP_printk("dev %d:%d agno 0x%x bucket %u old 0x%x new 0x%x",
  3454. MAJOR(__entry->dev), MINOR(__entry->dev),
  3455. __entry->agno,
  3456. __entry->bucket,
  3457. __entry->old_ptr,
  3458. __entry->new_ptr)
  3459. );
  3460. TRACE_EVENT(xfs_iunlink_update_dinode,
  3461. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
  3462. xfs_agino_t old_ptr, xfs_agino_t new_ptr),
  3463. TP_ARGS(mp, agno, agino, old_ptr, new_ptr),
  3464. TP_STRUCT__entry(
  3465. __field(dev_t, dev)
  3466. __field(xfs_agnumber_t, agno)
  3467. __field(xfs_agino_t, agino)
  3468. __field(xfs_agino_t, old_ptr)
  3469. __field(xfs_agino_t, new_ptr)
  3470. ),
  3471. TP_fast_assign(
  3472. __entry->dev = mp->m_super->s_dev;
  3473. __entry->agno = agno;
  3474. __entry->agino = agino;
  3475. __entry->old_ptr = old_ptr;
  3476. __entry->new_ptr = new_ptr;
  3477. ),
  3478. TP_printk("dev %d:%d agno 0x%x agino 0x%x old 0x%x new 0x%x",
  3479. MAJOR(__entry->dev), MINOR(__entry->dev),
  3480. __entry->agno,
  3481. __entry->agino,
  3482. __entry->old_ptr,
  3483. __entry->new_ptr)
  3484. );
  3485. DECLARE_EVENT_CLASS(xfs_ag_inode_class,
  3486. TP_PROTO(struct xfs_inode *ip),
  3487. TP_ARGS(ip),
  3488. TP_STRUCT__entry(
  3489. __field(dev_t, dev)
  3490. __field(xfs_agnumber_t, agno)
  3491. __field(xfs_agino_t, agino)
  3492. ),
  3493. TP_fast_assign(
  3494. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3495. __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
  3496. __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
  3497. ),
  3498. TP_printk("dev %d:%d agno 0x%x agino 0x%x",
  3499. MAJOR(__entry->dev), MINOR(__entry->dev),
  3500. __entry->agno, __entry->agino)
  3501. )
  3502. #define DEFINE_AGINODE_EVENT(name) \
  3503. DEFINE_EVENT(xfs_ag_inode_class, name, \
  3504. TP_PROTO(struct xfs_inode *ip), \
  3505. TP_ARGS(ip))
  3506. DEFINE_AGINODE_EVENT(xfs_iunlink);
  3507. DEFINE_AGINODE_EVENT(xfs_iunlink_remove);
  3508. DECLARE_EVENT_CLASS(xfs_fs_corrupt_class,
  3509. TP_PROTO(struct xfs_mount *mp, unsigned int flags),
  3510. TP_ARGS(mp, flags),
  3511. TP_STRUCT__entry(
  3512. __field(dev_t, dev)
  3513. __field(unsigned int, flags)
  3514. ),
  3515. TP_fast_assign(
  3516. __entry->dev = mp->m_super->s_dev;
  3517. __entry->flags = flags;
  3518. ),
  3519. TP_printk("dev %d:%d flags 0x%x",
  3520. MAJOR(__entry->dev), MINOR(__entry->dev),
  3521. __entry->flags)
  3522. );
  3523. #define DEFINE_FS_CORRUPT_EVENT(name) \
  3524. DEFINE_EVENT(xfs_fs_corrupt_class, name, \
  3525. TP_PROTO(struct xfs_mount *mp, unsigned int flags), \
  3526. TP_ARGS(mp, flags))
  3527. DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_sick);
  3528. DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_healthy);
  3529. DEFINE_FS_CORRUPT_EVENT(xfs_fs_unfixed_corruption);
  3530. DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_sick);
  3531. DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_healthy);
  3532. DEFINE_FS_CORRUPT_EVENT(xfs_rt_unfixed_corruption);
  3533. DECLARE_EVENT_CLASS(xfs_ag_corrupt_class,
  3534. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int flags),
  3535. TP_ARGS(mp, agno, flags),
  3536. TP_STRUCT__entry(
  3537. __field(dev_t, dev)
  3538. __field(xfs_agnumber_t, agno)
  3539. __field(unsigned int, flags)
  3540. ),
  3541. TP_fast_assign(
  3542. __entry->dev = mp->m_super->s_dev;
  3543. __entry->agno = agno;
  3544. __entry->flags = flags;
  3545. ),
  3546. TP_printk("dev %d:%d agno 0x%x flags 0x%x",
  3547. MAJOR(__entry->dev), MINOR(__entry->dev),
  3548. __entry->agno, __entry->flags)
  3549. );
  3550. #define DEFINE_AG_CORRUPT_EVENT(name) \
  3551. DEFINE_EVENT(xfs_ag_corrupt_class, name, \
  3552. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  3553. unsigned int flags), \
  3554. TP_ARGS(mp, agno, flags))
  3555. DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_sick);
  3556. DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_healthy);
  3557. DEFINE_AG_CORRUPT_EVENT(xfs_ag_unfixed_corruption);
  3558. DECLARE_EVENT_CLASS(xfs_inode_corrupt_class,
  3559. TP_PROTO(struct xfs_inode *ip, unsigned int flags),
  3560. TP_ARGS(ip, flags),
  3561. TP_STRUCT__entry(
  3562. __field(dev_t, dev)
  3563. __field(xfs_ino_t, ino)
  3564. __field(unsigned int, flags)
  3565. ),
  3566. TP_fast_assign(
  3567. __entry->dev = ip->i_mount->m_super->s_dev;
  3568. __entry->ino = ip->i_ino;
  3569. __entry->flags = flags;
  3570. ),
  3571. TP_printk("dev %d:%d ino 0x%llx flags 0x%x",
  3572. MAJOR(__entry->dev), MINOR(__entry->dev),
  3573. __entry->ino, __entry->flags)
  3574. );
  3575. #define DEFINE_INODE_CORRUPT_EVENT(name) \
  3576. DEFINE_EVENT(xfs_inode_corrupt_class, name, \
  3577. TP_PROTO(struct xfs_inode *ip, unsigned int flags), \
  3578. TP_ARGS(ip, flags))
  3579. DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_sick);
  3580. DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_healthy);
  3581. TRACE_EVENT(xfs_iwalk_ag,
  3582. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  3583. xfs_agino_t startino),
  3584. TP_ARGS(mp, agno, startino),
  3585. TP_STRUCT__entry(
  3586. __field(dev_t, dev)
  3587. __field(xfs_agnumber_t, agno)
  3588. __field(xfs_agino_t, startino)
  3589. ),
  3590. TP_fast_assign(
  3591. __entry->dev = mp->m_super->s_dev;
  3592. __entry->agno = agno;
  3593. __entry->startino = startino;
  3594. ),
  3595. TP_printk("dev %d:%d agno 0x%x startino 0x%x",
  3596. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno,
  3597. __entry->startino)
  3598. )
  3599. TRACE_EVENT(xfs_iwalk_ag_rec,
  3600. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  3601. struct xfs_inobt_rec_incore *irec),
  3602. TP_ARGS(mp, agno, irec),
  3603. TP_STRUCT__entry(
  3604. __field(dev_t, dev)
  3605. __field(xfs_agnumber_t, agno)
  3606. __field(xfs_agino_t, startino)
  3607. __field(uint64_t, freemask)
  3608. ),
  3609. TP_fast_assign(
  3610. __entry->dev = mp->m_super->s_dev;
  3611. __entry->agno = agno;
  3612. __entry->startino = irec->ir_startino;
  3613. __entry->freemask = irec->ir_free;
  3614. ),
  3615. TP_printk("dev %d:%d agno 0x%x startino 0x%x freemask 0x%llx",
  3616. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno,
  3617. __entry->startino, __entry->freemask)
  3618. )
  3619. TRACE_EVENT(xfs_pwork_init,
  3620. TP_PROTO(struct xfs_mount *mp, unsigned int nr_threads, pid_t pid),
  3621. TP_ARGS(mp, nr_threads, pid),
  3622. TP_STRUCT__entry(
  3623. __field(dev_t, dev)
  3624. __field(unsigned int, nr_threads)
  3625. __field(pid_t, pid)
  3626. ),
  3627. TP_fast_assign(
  3628. __entry->dev = mp->m_super->s_dev;
  3629. __entry->nr_threads = nr_threads;
  3630. __entry->pid = pid;
  3631. ),
  3632. TP_printk("dev %d:%d nr_threads %u pid %u",
  3633. MAJOR(__entry->dev), MINOR(__entry->dev),
  3634. __entry->nr_threads, __entry->pid)
  3635. )
  3636. DECLARE_EVENT_CLASS(xfs_kmem_class,
  3637. TP_PROTO(ssize_t size, int flags, unsigned long caller_ip),
  3638. TP_ARGS(size, flags, caller_ip),
  3639. TP_STRUCT__entry(
  3640. __field(ssize_t, size)
  3641. __field(int, flags)
  3642. __field(unsigned long, caller_ip)
  3643. ),
  3644. TP_fast_assign(
  3645. __entry->size = size;
  3646. __entry->flags = flags;
  3647. __entry->caller_ip = caller_ip;
  3648. ),
  3649. TP_printk("size %zd flags 0x%x caller %pS",
  3650. __entry->size,
  3651. __entry->flags,
  3652. (char *)__entry->caller_ip)
  3653. )
  3654. #define DEFINE_KMEM_EVENT(name) \
  3655. DEFINE_EVENT(xfs_kmem_class, name, \
  3656. TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
  3657. TP_ARGS(size, flags, caller_ip))
  3658. DEFINE_KMEM_EVENT(kmem_alloc);
  3659. TRACE_EVENT(xfs_check_new_dalign,
  3660. TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),
  3661. TP_ARGS(mp, new_dalign, calc_rootino),
  3662. TP_STRUCT__entry(
  3663. __field(dev_t, dev)
  3664. __field(int, new_dalign)
  3665. __field(xfs_ino_t, sb_rootino)
  3666. __field(xfs_ino_t, calc_rootino)
  3667. ),
  3668. TP_fast_assign(
  3669. __entry->dev = mp->m_super->s_dev;
  3670. __entry->new_dalign = new_dalign;
  3671. __entry->sb_rootino = mp->m_sb.sb_rootino;
  3672. __entry->calc_rootino = calc_rootino;
  3673. ),
  3674. TP_printk("dev %d:%d new_dalign %d sb_rootino 0x%llx calc_rootino 0x%llx",
  3675. MAJOR(__entry->dev), MINOR(__entry->dev),
  3676. __entry->new_dalign, __entry->sb_rootino,
  3677. __entry->calc_rootino)
  3678. )
  3679. TRACE_EVENT(xfs_btree_commit_afakeroot,
  3680. TP_PROTO(struct xfs_btree_cur *cur),
  3681. TP_ARGS(cur),
  3682. TP_STRUCT__entry(
  3683. __field(dev_t, dev)
  3684. __field(xfs_btnum_t, btnum)
  3685. __field(xfs_agnumber_t, agno)
  3686. __field(xfs_agblock_t, agbno)
  3687. __field(unsigned int, levels)
  3688. __field(unsigned int, blocks)
  3689. ),
  3690. TP_fast_assign(
  3691. __entry->dev = cur->bc_mp->m_super->s_dev;
  3692. __entry->btnum = cur->bc_btnum;
  3693. __entry->agno = cur->bc_ag.pag->pag_agno;
  3694. __entry->agbno = cur->bc_ag.afake->af_root;
  3695. __entry->levels = cur->bc_ag.afake->af_levels;
  3696. __entry->blocks = cur->bc_ag.afake->af_blocks;
  3697. ),
  3698. TP_printk("dev %d:%d btree %s agno 0x%x levels %u blocks %u root %u",
  3699. MAJOR(__entry->dev), MINOR(__entry->dev),
  3700. __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
  3701. __entry->agno,
  3702. __entry->levels,
  3703. __entry->blocks,
  3704. __entry->agbno)
  3705. )
  3706. TRACE_EVENT(xfs_btree_commit_ifakeroot,
  3707. TP_PROTO(struct xfs_btree_cur *cur),
  3708. TP_ARGS(cur),
  3709. TP_STRUCT__entry(
  3710. __field(dev_t, dev)
  3711. __field(xfs_btnum_t, btnum)
  3712. __field(xfs_agnumber_t, agno)
  3713. __field(xfs_agino_t, agino)
  3714. __field(unsigned int, levels)
  3715. __field(unsigned int, blocks)
  3716. __field(int, whichfork)
  3717. ),
  3718. TP_fast_assign(
  3719. __entry->dev = cur->bc_mp->m_super->s_dev;
  3720. __entry->btnum = cur->bc_btnum;
  3721. __entry->agno = XFS_INO_TO_AGNO(cur->bc_mp,
  3722. cur->bc_ino.ip->i_ino);
  3723. __entry->agino = XFS_INO_TO_AGINO(cur->bc_mp,
  3724. cur->bc_ino.ip->i_ino);
  3725. __entry->levels = cur->bc_ino.ifake->if_levels;
  3726. __entry->blocks = cur->bc_ino.ifake->if_blocks;
  3727. __entry->whichfork = cur->bc_ino.whichfork;
  3728. ),
  3729. TP_printk("dev %d:%d btree %s agno 0x%x agino 0x%x whichfork %s levels %u blocks %u",
  3730. MAJOR(__entry->dev), MINOR(__entry->dev),
  3731. __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
  3732. __entry->agno,
  3733. __entry->agino,
  3734. __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
  3735. __entry->levels,
  3736. __entry->blocks)
  3737. )
  3738. TRACE_EVENT(xfs_btree_bload_level_geometry,
  3739. TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
  3740. uint64_t nr_this_level, unsigned int nr_per_block,
  3741. unsigned int desired_npb, uint64_t blocks,
  3742. uint64_t blocks_with_extra),
  3743. TP_ARGS(cur, level, nr_this_level, nr_per_block, desired_npb, blocks,
  3744. blocks_with_extra),
  3745. TP_STRUCT__entry(
  3746. __field(dev_t, dev)
  3747. __field(xfs_btnum_t, btnum)
  3748. __field(unsigned int, level)
  3749. __field(unsigned int, nlevels)
  3750. __field(uint64_t, nr_this_level)
  3751. __field(unsigned int, nr_per_block)
  3752. __field(unsigned int, desired_npb)
  3753. __field(unsigned long long, blocks)
  3754. __field(unsigned long long, blocks_with_extra)
  3755. ),
  3756. TP_fast_assign(
  3757. __entry->dev = cur->bc_mp->m_super->s_dev;
  3758. __entry->btnum = cur->bc_btnum;
  3759. __entry->level = level;
  3760. __entry->nlevels = cur->bc_nlevels;
  3761. __entry->nr_this_level = nr_this_level;
  3762. __entry->nr_per_block = nr_per_block;
  3763. __entry->desired_npb = desired_npb;
  3764. __entry->blocks = blocks;
  3765. __entry->blocks_with_extra = blocks_with_extra;
  3766. ),
  3767. TP_printk("dev %d:%d btree %s level %u/%u nr_this_level %llu nr_per_block %u desired_npb %u blocks %llu blocks_with_extra %llu",
  3768. MAJOR(__entry->dev), MINOR(__entry->dev),
  3769. __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
  3770. __entry->level,
  3771. __entry->nlevels,
  3772. __entry->nr_this_level,
  3773. __entry->nr_per_block,
  3774. __entry->desired_npb,
  3775. __entry->blocks,
  3776. __entry->blocks_with_extra)
  3777. )
  3778. TRACE_EVENT(xfs_btree_bload_block,
  3779. TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
  3780. uint64_t block_idx, uint64_t nr_blocks,
  3781. union xfs_btree_ptr *ptr, unsigned int nr_records),
  3782. TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records),
  3783. TP_STRUCT__entry(
  3784. __field(dev_t, dev)
  3785. __field(xfs_btnum_t, btnum)
  3786. __field(unsigned int, level)
  3787. __field(unsigned long long, block_idx)
  3788. __field(unsigned long long, nr_blocks)
  3789. __field(xfs_agnumber_t, agno)
  3790. __field(xfs_agblock_t, agbno)
  3791. __field(unsigned int, nr_records)
  3792. ),
  3793. TP_fast_assign(
  3794. __entry->dev = cur->bc_mp->m_super->s_dev;
  3795. __entry->btnum = cur->bc_btnum;
  3796. __entry->level = level;
  3797. __entry->block_idx = block_idx;
  3798. __entry->nr_blocks = nr_blocks;
  3799. if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
  3800. xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
  3801. __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
  3802. __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
  3803. } else {
  3804. __entry->agno = cur->bc_ag.pag->pag_agno;
  3805. __entry->agbno = be32_to_cpu(ptr->s);
  3806. }
  3807. __entry->nr_records = nr_records;
  3808. ),
  3809. TP_printk("dev %d:%d btree %s level %u block %llu/%llu agno 0x%x agbno 0x%x recs %u",
  3810. MAJOR(__entry->dev), MINOR(__entry->dev),
  3811. __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
  3812. __entry->level,
  3813. __entry->block_idx,
  3814. __entry->nr_blocks,
  3815. __entry->agno,
  3816. __entry->agbno,
  3817. __entry->nr_records)
  3818. )
  3819. DECLARE_EVENT_CLASS(xfs_timestamp_range_class,
  3820. TP_PROTO(struct xfs_mount *mp, time64_t min, time64_t max),
  3821. TP_ARGS(mp, min, max),
  3822. TP_STRUCT__entry(
  3823. __field(dev_t, dev)
  3824. __field(long long, min)
  3825. __field(long long, max)
  3826. ),
  3827. TP_fast_assign(
  3828. __entry->dev = mp->m_super->s_dev;
  3829. __entry->min = min;
  3830. __entry->max = max;
  3831. ),
  3832. TP_printk("dev %d:%d min %lld max %lld",
  3833. MAJOR(__entry->dev), MINOR(__entry->dev),
  3834. __entry->min,
  3835. __entry->max)
  3836. )
  3837. #define DEFINE_TIMESTAMP_RANGE_EVENT(name) \
  3838. DEFINE_EVENT(xfs_timestamp_range_class, name, \
  3839. TP_PROTO(struct xfs_mount *mp, long long min, long long max), \
  3840. TP_ARGS(mp, min, max))
  3841. DEFINE_TIMESTAMP_RANGE_EVENT(xfs_inode_timestamp_range);
  3842. DEFINE_TIMESTAMP_RANGE_EVENT(xfs_quota_expiry_range);
  3843. DECLARE_EVENT_CLASS(xfs_icwalk_class,
  3844. TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw,
  3845. unsigned long caller_ip),
  3846. TP_ARGS(mp, icw, caller_ip),
  3847. TP_STRUCT__entry(
  3848. __field(dev_t, dev)
  3849. __field(__u32, flags)
  3850. __field(uint32_t, uid)
  3851. __field(uint32_t, gid)
  3852. __field(prid_t, prid)
  3853. __field(__u64, min_file_size)
  3854. __field(long, scan_limit)
  3855. __field(unsigned long, caller_ip)
  3856. ),
  3857. TP_fast_assign(
  3858. __entry->dev = mp->m_super->s_dev;
  3859. __entry->flags = icw ? icw->icw_flags : 0;
  3860. __entry->uid = icw ? from_kuid(mp->m_super->s_user_ns,
  3861. icw->icw_uid) : 0;
  3862. __entry->gid = icw ? from_kgid(mp->m_super->s_user_ns,
  3863. icw->icw_gid) : 0;
  3864. __entry->prid = icw ? icw->icw_prid : 0;
  3865. __entry->min_file_size = icw ? icw->icw_min_file_size : 0;
  3866. __entry->scan_limit = icw ? icw->icw_scan_limit : 0;
  3867. __entry->caller_ip = caller_ip;
  3868. ),
  3869. TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu scan_limit %ld caller %pS",
  3870. MAJOR(__entry->dev), MINOR(__entry->dev),
  3871. __entry->flags,
  3872. __entry->uid,
  3873. __entry->gid,
  3874. __entry->prid,
  3875. __entry->min_file_size,
  3876. __entry->scan_limit,
  3877. (char *)__entry->caller_ip)
  3878. );
  3879. #define DEFINE_ICWALK_EVENT(name) \
  3880. DEFINE_EVENT(xfs_icwalk_class, name, \
  3881. TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw, \
  3882. unsigned long caller_ip), \
  3883. TP_ARGS(mp, icw, caller_ip))
  3884. DEFINE_ICWALK_EVENT(xfs_ioc_free_eofblocks);
  3885. DEFINE_ICWALK_EVENT(xfs_blockgc_free_space);
  3886. TRACE_DEFINE_ENUM(XLOG_STATE_ACTIVE);
  3887. TRACE_DEFINE_ENUM(XLOG_STATE_WANT_SYNC);
  3888. TRACE_DEFINE_ENUM(XLOG_STATE_SYNCING);
  3889. TRACE_DEFINE_ENUM(XLOG_STATE_DONE_SYNC);
  3890. TRACE_DEFINE_ENUM(XLOG_STATE_CALLBACK);
  3891. TRACE_DEFINE_ENUM(XLOG_STATE_DIRTY);
  3892. DECLARE_EVENT_CLASS(xlog_iclog_class,
  3893. TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip),
  3894. TP_ARGS(iclog, caller_ip),
  3895. TP_STRUCT__entry(
  3896. __field(dev_t, dev)
  3897. __field(uint32_t, state)
  3898. __field(int32_t, refcount)
  3899. __field(uint32_t, offset)
  3900. __field(uint32_t, flags)
  3901. __field(unsigned long long, lsn)
  3902. __field(unsigned long, caller_ip)
  3903. ),
  3904. TP_fast_assign(
  3905. __entry->dev = iclog->ic_log->l_mp->m_super->s_dev;
  3906. __entry->state = iclog->ic_state;
  3907. __entry->refcount = atomic_read(&iclog->ic_refcnt);
  3908. __entry->offset = iclog->ic_offset;
  3909. __entry->flags = iclog->ic_flags;
  3910. __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
  3911. __entry->caller_ip = caller_ip;
  3912. ),
  3913. TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",
  3914. MAJOR(__entry->dev), MINOR(__entry->dev),
  3915. __print_symbolic(__entry->state, XLOG_STATE_STRINGS),
  3916. __entry->refcount,
  3917. __entry->offset,
  3918. __entry->lsn,
  3919. __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS),
  3920. (char *)__entry->caller_ip)
  3921. );
  3922. #define DEFINE_ICLOG_EVENT(name) \
  3923. DEFINE_EVENT(xlog_iclog_class, name, \
  3924. TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip), \
  3925. TP_ARGS(iclog, caller_ip))
  3926. DEFINE_ICLOG_EVENT(xlog_iclog_activate);
  3927. DEFINE_ICLOG_EVENT(xlog_iclog_clean);
  3928. DEFINE_ICLOG_EVENT(xlog_iclog_callback);
  3929. DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_start);
  3930. DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_done);
  3931. DEFINE_ICLOG_EVENT(xlog_iclog_force);
  3932. DEFINE_ICLOG_EVENT(xlog_iclog_force_lsn);
  3933. DEFINE_ICLOG_EVENT(xlog_iclog_get_space);
  3934. DEFINE_ICLOG_EVENT(xlog_iclog_release);
  3935. DEFINE_ICLOG_EVENT(xlog_iclog_switch);
  3936. DEFINE_ICLOG_EVENT(xlog_iclog_sync);
  3937. DEFINE_ICLOG_EVENT(xlog_iclog_syncing);
  3938. DEFINE_ICLOG_EVENT(xlog_iclog_sync_done);
  3939. DEFINE_ICLOG_EVENT(xlog_iclog_want_sync);
  3940. DEFINE_ICLOG_EVENT(xlog_iclog_wait_on);
  3941. DEFINE_ICLOG_EVENT(xlog_iclog_write);
  3942. TRACE_DEFINE_ENUM(XFS_DAS_UNINIT);
  3943. TRACE_DEFINE_ENUM(XFS_DAS_SF_ADD);
  3944. TRACE_DEFINE_ENUM(XFS_DAS_SF_REMOVE);
  3945. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_ADD);
  3946. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE);
  3947. TRACE_DEFINE_ENUM(XFS_DAS_NODE_ADD);
  3948. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE);
  3949. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_SET_RMT);
  3950. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_ALLOC_RMT);
  3951. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REPLACE);
  3952. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_OLD);
  3953. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_RMT);
  3954. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_ATTR);
  3955. TRACE_DEFINE_ENUM(XFS_DAS_NODE_SET_RMT);
  3956. TRACE_DEFINE_ENUM(XFS_DAS_NODE_ALLOC_RMT);
  3957. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REPLACE);
  3958. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_OLD);
  3959. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_RMT);
  3960. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_ATTR);
  3961. TRACE_DEFINE_ENUM(XFS_DAS_DONE);
  3962. DECLARE_EVENT_CLASS(xfs_das_state_class,
  3963. TP_PROTO(int das, struct xfs_inode *ip),
  3964. TP_ARGS(das, ip),
  3965. TP_STRUCT__entry(
  3966. __field(int, das)
  3967. __field(xfs_ino_t, ino)
  3968. ),
  3969. TP_fast_assign(
  3970. __entry->das = das;
  3971. __entry->ino = ip->i_ino;
  3972. ),
  3973. TP_printk("state change %s ino 0x%llx",
  3974. __print_symbolic(__entry->das, XFS_DAS_STRINGS),
  3975. __entry->ino)
  3976. )
  3977. #define DEFINE_DAS_STATE_EVENT(name) \
  3978. DEFINE_EVENT(xfs_das_state_class, name, \
  3979. TP_PROTO(int das, struct xfs_inode *ip), \
  3980. TP_ARGS(das, ip))
  3981. DEFINE_DAS_STATE_EVENT(xfs_attr_sf_addname_return);
  3982. DEFINE_DAS_STATE_EVENT(xfs_attr_set_iter_return);
  3983. DEFINE_DAS_STATE_EVENT(xfs_attr_leaf_addname_return);
  3984. DEFINE_DAS_STATE_EVENT(xfs_attr_node_addname_return);
  3985. DEFINE_DAS_STATE_EVENT(xfs_attr_remove_iter_return);
  3986. DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_alloc);
  3987. DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_remove_return);
  3988. DEFINE_DAS_STATE_EVENT(xfs_attr_defer_add);
  3989. DEFINE_DAS_STATE_EVENT(xfs_attr_defer_replace);
  3990. DEFINE_DAS_STATE_EVENT(xfs_attr_defer_remove);
  3991. TRACE_EVENT(xfs_force_shutdown,
  3992. TP_PROTO(struct xfs_mount *mp, int ptag, int flags, const char *fname,
  3993. int line_num),
  3994. TP_ARGS(mp, ptag, flags, fname, line_num),
  3995. TP_STRUCT__entry(
  3996. __field(dev_t, dev)
  3997. __field(int, ptag)
  3998. __field(int, flags)
  3999. __string(fname, fname)
  4000. __field(int, line_num)
  4001. ),
  4002. TP_fast_assign(
  4003. __entry->dev = mp->m_super->s_dev;
  4004. __entry->ptag = ptag;
  4005. __entry->flags = flags;
  4006. __assign_str(fname, fname);
  4007. __entry->line_num = line_num;
  4008. ),
  4009. TP_printk("dev %d:%d tag %s flags %s file %s line_num %d",
  4010. MAJOR(__entry->dev), MINOR(__entry->dev),
  4011. __print_flags(__entry->ptag, "|", XFS_PTAG_STRINGS),
  4012. __print_flags(__entry->flags, "|", XFS_SHUTDOWN_STRINGS),
  4013. __get_str(fname),
  4014. __entry->line_num)
  4015. );
  4016. #endif /* _TRACE_XFS_H */
  4017. #undef TRACE_INCLUDE_PATH
  4018. #define TRACE_INCLUDE_PATH .
  4019. #define TRACE_INCLUDE_FILE xfs_trace
  4020. #include <trace/define_trace.h>