trace.h 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2014 Christoph Hellwig.
  4. */
  5. #undef TRACE_SYSTEM
  6. #define TRACE_SYSTEM nfsd
  7. #if !defined(_NFSD_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
  8. #define _NFSD_TRACE_H
  9. #include <linux/tracepoint.h>
  10. #include "export.h"
  11. #include "nfsfh.h"
  12. #define NFSD_TRACE_PROC_RES_FIELDS \
  13. __field(unsigned int, netns_ino) \
  14. __field(u32, xid) \
  15. __field(unsigned long, status) \
  16. __array(unsigned char, server, sizeof(struct sockaddr_in6)) \
  17. __array(unsigned char, client, sizeof(struct sockaddr_in6))
  18. #define NFSD_TRACE_PROC_RES_ASSIGNMENTS(error) \
  19. do { \
  20. __entry->netns_ino = SVC_NET(rqstp)->ns.inum; \
  21. __entry->xid = be32_to_cpu(rqstp->rq_xid); \
  22. __entry->status = be32_to_cpu(error); \
  23. memcpy(__entry->server, &rqstp->rq_xprt->xpt_local, \
  24. rqstp->rq_xprt->xpt_locallen); \
  25. memcpy(__entry->client, &rqstp->rq_xprt->xpt_remote, \
  26. rqstp->rq_xprt->xpt_remotelen); \
  27. } while (0);
  28. DECLARE_EVENT_CLASS(nfsd_xdr_err_class,
  29. TP_PROTO(
  30. const struct svc_rqst *rqstp
  31. ),
  32. TP_ARGS(rqstp),
  33. TP_STRUCT__entry(
  34. __field(unsigned int, netns_ino)
  35. __field(u32, xid)
  36. __field(u32, vers)
  37. __field(u32, proc)
  38. __sockaddr(server, rqstp->rq_xprt->xpt_locallen)
  39. __sockaddr(client, rqstp->rq_xprt->xpt_remotelen)
  40. ),
  41. TP_fast_assign(
  42. const struct svc_xprt *xprt = rqstp->rq_xprt;
  43. __entry->netns_ino = xprt->xpt_net->ns.inum;
  44. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  45. __entry->vers = rqstp->rq_vers;
  46. __entry->proc = rqstp->rq_proc;
  47. __assign_sockaddr(server, &xprt->xpt_local, xprt->xpt_locallen);
  48. __assign_sockaddr(client, &xprt->xpt_remote, xprt->xpt_remotelen);
  49. ),
  50. TP_printk("xid=0x%08x vers=%u proc=%u",
  51. __entry->xid, __entry->vers, __entry->proc
  52. )
  53. );
  54. #define DEFINE_NFSD_XDR_ERR_EVENT(name) \
  55. DEFINE_EVENT(nfsd_xdr_err_class, nfsd_##name##_err, \
  56. TP_PROTO(const struct svc_rqst *rqstp), \
  57. TP_ARGS(rqstp))
  58. DEFINE_NFSD_XDR_ERR_EVENT(garbage_args);
  59. DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
  60. #define show_nfsd_may_flags(x) \
  61. __print_flags(x, "|", \
  62. { NFSD_MAY_EXEC, "EXEC" }, \
  63. { NFSD_MAY_WRITE, "WRITE" }, \
  64. { NFSD_MAY_READ, "READ" }, \
  65. { NFSD_MAY_SATTR, "SATTR" }, \
  66. { NFSD_MAY_TRUNC, "TRUNC" }, \
  67. { NFSD_MAY_LOCK, "LOCK" }, \
  68. { NFSD_MAY_OWNER_OVERRIDE, "OWNER_OVERRIDE" }, \
  69. { NFSD_MAY_LOCAL_ACCESS, "LOCAL_ACCESS" }, \
  70. { NFSD_MAY_BYPASS_GSS_ON_ROOT, "BYPASS_GSS_ON_ROOT" }, \
  71. { NFSD_MAY_NOT_BREAK_LEASE, "NOT_BREAK_LEASE" }, \
  72. { NFSD_MAY_BYPASS_GSS, "BYPASS_GSS" }, \
  73. { NFSD_MAY_READ_IF_EXEC, "READ_IF_EXEC" }, \
  74. { NFSD_MAY_64BIT_COOKIE, "64BIT_COOKIE" })
  75. TRACE_EVENT(nfsd_compound,
  76. TP_PROTO(
  77. const struct svc_rqst *rqst,
  78. const char *tag,
  79. u32 taglen,
  80. u32 opcnt
  81. ),
  82. TP_ARGS(rqst, tag, taglen, opcnt),
  83. TP_STRUCT__entry(
  84. __field(u32, xid)
  85. __field(u32, opcnt)
  86. __string_len(tag, tag, taglen)
  87. ),
  88. TP_fast_assign(
  89. __entry->xid = be32_to_cpu(rqst->rq_xid);
  90. __entry->opcnt = opcnt;
  91. __assign_str_len(tag, tag, taglen);
  92. ),
  93. TP_printk("xid=0x%08x opcnt=%u tag=%s",
  94. __entry->xid, __entry->opcnt, __get_str(tag)
  95. )
  96. )
  97. TRACE_EVENT(nfsd_compound_status,
  98. TP_PROTO(u32 args_opcnt,
  99. u32 resp_opcnt,
  100. __be32 status,
  101. const char *name),
  102. TP_ARGS(args_opcnt, resp_opcnt, status, name),
  103. TP_STRUCT__entry(
  104. __field(u32, args_opcnt)
  105. __field(u32, resp_opcnt)
  106. __field(int, status)
  107. __string(name, name)
  108. ),
  109. TP_fast_assign(
  110. __entry->args_opcnt = args_opcnt;
  111. __entry->resp_opcnt = resp_opcnt;
  112. __entry->status = be32_to_cpu(status);
  113. __assign_str(name, name);
  114. ),
  115. TP_printk("op=%u/%u %s status=%d",
  116. __entry->resp_opcnt, __entry->args_opcnt,
  117. __get_str(name), __entry->status)
  118. )
  119. TRACE_EVENT(nfsd_compound_decode_err,
  120. TP_PROTO(
  121. const struct svc_rqst *rqstp,
  122. u32 args_opcnt,
  123. u32 resp_opcnt,
  124. u32 opnum,
  125. __be32 status
  126. ),
  127. TP_ARGS(rqstp, args_opcnt, resp_opcnt, opnum, status),
  128. TP_STRUCT__entry(
  129. NFSD_TRACE_PROC_RES_FIELDS
  130. __field(u32, args_opcnt)
  131. __field(u32, resp_opcnt)
  132. __field(u32, opnum)
  133. ),
  134. TP_fast_assign(
  135. NFSD_TRACE_PROC_RES_ASSIGNMENTS(status)
  136. __entry->args_opcnt = args_opcnt;
  137. __entry->resp_opcnt = resp_opcnt;
  138. __entry->opnum = opnum;
  139. ),
  140. TP_printk("op=%u/%u opnum=%u status=%lu",
  141. __entry->resp_opcnt, __entry->args_opcnt,
  142. __entry->opnum, __entry->status)
  143. );
  144. TRACE_EVENT(nfsd_compound_encode_err,
  145. TP_PROTO(
  146. const struct svc_rqst *rqstp,
  147. u32 opnum,
  148. __be32 status
  149. ),
  150. TP_ARGS(rqstp, opnum, status),
  151. TP_STRUCT__entry(
  152. NFSD_TRACE_PROC_RES_FIELDS
  153. __field(u32, opnum)
  154. ),
  155. TP_fast_assign(
  156. NFSD_TRACE_PROC_RES_ASSIGNMENTS(status)
  157. __entry->opnum = opnum;
  158. ),
  159. TP_printk("opnum=%u status=%lu",
  160. __entry->opnum, __entry->status)
  161. );
  162. #define show_fs_file_type(x) \
  163. __print_symbolic(x, \
  164. { S_IFLNK, "LNK" }, \
  165. { S_IFREG, "REG" }, \
  166. { S_IFDIR, "DIR" }, \
  167. { S_IFCHR, "CHR" }, \
  168. { S_IFBLK, "BLK" }, \
  169. { S_IFIFO, "FIFO" }, \
  170. { S_IFSOCK, "SOCK" })
  171. TRACE_EVENT(nfsd_fh_verify,
  172. TP_PROTO(
  173. const struct svc_rqst *rqstp,
  174. const struct svc_fh *fhp,
  175. umode_t type,
  176. int access
  177. ),
  178. TP_ARGS(rqstp, fhp, type, access),
  179. TP_STRUCT__entry(
  180. __field(unsigned int, netns_ino)
  181. __sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
  182. __sockaddr(client, rqstp->rq_xprt->xpt_remotelen)
  183. __field(u32, xid)
  184. __field(u32, fh_hash)
  185. __field(const void *, inode)
  186. __field(unsigned long, type)
  187. __field(unsigned long, access)
  188. ),
  189. TP_fast_assign(
  190. __entry->netns_ino = SVC_NET(rqstp)->ns.inum;
  191. __assign_sockaddr(server, &rqstp->rq_xprt->xpt_local,
  192. rqstp->rq_xprt->xpt_locallen);
  193. __assign_sockaddr(client, &rqstp->rq_xprt->xpt_remote,
  194. rqstp->rq_xprt->xpt_remotelen);
  195. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  196. __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
  197. __entry->inode = d_inode(fhp->fh_dentry);
  198. __entry->type = type;
  199. __entry->access = access;
  200. ),
  201. TP_printk("xid=0x%08x fh_hash=0x%08x type=%s access=%s",
  202. __entry->xid, __entry->fh_hash,
  203. show_fs_file_type(__entry->type),
  204. show_nfsd_may_flags(__entry->access)
  205. )
  206. );
  207. TRACE_EVENT_CONDITION(nfsd_fh_verify_err,
  208. TP_PROTO(
  209. const struct svc_rqst *rqstp,
  210. const struct svc_fh *fhp,
  211. umode_t type,
  212. int access,
  213. __be32 error
  214. ),
  215. TP_ARGS(rqstp, fhp, type, access, error),
  216. TP_CONDITION(error),
  217. TP_STRUCT__entry(
  218. __field(unsigned int, netns_ino)
  219. __sockaddr(server, rqstp->rq_xprt->xpt_remotelen)
  220. __sockaddr(client, rqstp->rq_xprt->xpt_remotelen)
  221. __field(u32, xid)
  222. __field(u32, fh_hash)
  223. __field(const void *, inode)
  224. __field(unsigned long, type)
  225. __field(unsigned long, access)
  226. __field(int, error)
  227. ),
  228. TP_fast_assign(
  229. __entry->netns_ino = SVC_NET(rqstp)->ns.inum;
  230. __assign_sockaddr(server, &rqstp->rq_xprt->xpt_local,
  231. rqstp->rq_xprt->xpt_locallen);
  232. __assign_sockaddr(client, &rqstp->rq_xprt->xpt_remote,
  233. rqstp->rq_xprt->xpt_remotelen);
  234. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  235. __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
  236. if (fhp->fh_dentry)
  237. __entry->inode = d_inode(fhp->fh_dentry);
  238. else
  239. __entry->inode = NULL;
  240. __entry->type = type;
  241. __entry->access = access;
  242. __entry->error = be32_to_cpu(error);
  243. ),
  244. TP_printk("xid=0x%08x fh_hash=0x%08x type=%s access=%s error=%d",
  245. __entry->xid, __entry->fh_hash,
  246. show_fs_file_type(__entry->type),
  247. show_nfsd_may_flags(__entry->access),
  248. __entry->error
  249. )
  250. );
  251. DECLARE_EVENT_CLASS(nfsd_fh_err_class,
  252. TP_PROTO(struct svc_rqst *rqstp,
  253. struct svc_fh *fhp,
  254. int status),
  255. TP_ARGS(rqstp, fhp, status),
  256. TP_STRUCT__entry(
  257. __field(u32, xid)
  258. __field(u32, fh_hash)
  259. __field(int, status)
  260. ),
  261. TP_fast_assign(
  262. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  263. __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
  264. __entry->status = status;
  265. ),
  266. TP_printk("xid=0x%08x fh_hash=0x%08x status=%d",
  267. __entry->xid, __entry->fh_hash,
  268. __entry->status)
  269. )
  270. #define DEFINE_NFSD_FH_ERR_EVENT(name) \
  271. DEFINE_EVENT(nfsd_fh_err_class, nfsd_##name, \
  272. TP_PROTO(struct svc_rqst *rqstp, \
  273. struct svc_fh *fhp, \
  274. int status), \
  275. TP_ARGS(rqstp, fhp, status))
  276. DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badexport);
  277. DEFINE_NFSD_FH_ERR_EVENT(set_fh_dentry_badhandle);
  278. TRACE_EVENT(nfsd_exp_find_key,
  279. TP_PROTO(const struct svc_expkey *key,
  280. int status),
  281. TP_ARGS(key, status),
  282. TP_STRUCT__entry(
  283. __field(int, fsidtype)
  284. __array(u32, fsid, 6)
  285. __string(auth_domain, key->ek_client->name)
  286. __field(int, status)
  287. ),
  288. TP_fast_assign(
  289. __entry->fsidtype = key->ek_fsidtype;
  290. memcpy(__entry->fsid, key->ek_fsid, 4*6);
  291. __assign_str(auth_domain, key->ek_client->name);
  292. __entry->status = status;
  293. ),
  294. TP_printk("fsid=%x::%s domain=%s status=%d",
  295. __entry->fsidtype,
  296. __print_array(__entry->fsid, 6, 4),
  297. __get_str(auth_domain),
  298. __entry->status
  299. )
  300. );
  301. TRACE_EVENT(nfsd_expkey_update,
  302. TP_PROTO(const struct svc_expkey *key, const char *exp_path),
  303. TP_ARGS(key, exp_path),
  304. TP_STRUCT__entry(
  305. __field(int, fsidtype)
  306. __array(u32, fsid, 6)
  307. __string(auth_domain, key->ek_client->name)
  308. __string(path, exp_path)
  309. __field(bool, cache)
  310. ),
  311. TP_fast_assign(
  312. __entry->fsidtype = key->ek_fsidtype;
  313. memcpy(__entry->fsid, key->ek_fsid, 4*6);
  314. __assign_str(auth_domain, key->ek_client->name);
  315. __assign_str(path, exp_path);
  316. __entry->cache = !test_bit(CACHE_NEGATIVE, &key->h.flags);
  317. ),
  318. TP_printk("fsid=%x::%s domain=%s path=%s cache=%s",
  319. __entry->fsidtype,
  320. __print_array(__entry->fsid, 6, 4),
  321. __get_str(auth_domain),
  322. __get_str(path),
  323. __entry->cache ? "pos" : "neg"
  324. )
  325. );
  326. TRACE_EVENT(nfsd_exp_get_by_name,
  327. TP_PROTO(const struct svc_export *key,
  328. int status),
  329. TP_ARGS(key, status),
  330. TP_STRUCT__entry(
  331. __string(path, key->ex_path.dentry->d_name.name)
  332. __string(auth_domain, key->ex_client->name)
  333. __field(int, status)
  334. ),
  335. TP_fast_assign(
  336. __assign_str(path, key->ex_path.dentry->d_name.name);
  337. __assign_str(auth_domain, key->ex_client->name);
  338. __entry->status = status;
  339. ),
  340. TP_printk("path=%s domain=%s status=%d",
  341. __get_str(path),
  342. __get_str(auth_domain),
  343. __entry->status
  344. )
  345. );
  346. TRACE_EVENT(nfsd_export_update,
  347. TP_PROTO(const struct svc_export *key),
  348. TP_ARGS(key),
  349. TP_STRUCT__entry(
  350. __string(path, key->ex_path.dentry->d_name.name)
  351. __string(auth_domain, key->ex_client->name)
  352. __field(bool, cache)
  353. ),
  354. TP_fast_assign(
  355. __assign_str(path, key->ex_path.dentry->d_name.name);
  356. __assign_str(auth_domain, key->ex_client->name);
  357. __entry->cache = !test_bit(CACHE_NEGATIVE, &key->h.flags);
  358. ),
  359. TP_printk("path=%s domain=%s cache=%s",
  360. __get_str(path),
  361. __get_str(auth_domain),
  362. __entry->cache ? "pos" : "neg"
  363. )
  364. );
  365. DECLARE_EVENT_CLASS(nfsd_io_class,
  366. TP_PROTO(struct svc_rqst *rqstp,
  367. struct svc_fh *fhp,
  368. u64 offset,
  369. u32 len),
  370. TP_ARGS(rqstp, fhp, offset, len),
  371. TP_STRUCT__entry(
  372. __field(u32, xid)
  373. __field(u32, fh_hash)
  374. __field(u64, offset)
  375. __field(u32, len)
  376. ),
  377. TP_fast_assign(
  378. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  379. __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
  380. __entry->offset = offset;
  381. __entry->len = len;
  382. ),
  383. TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u",
  384. __entry->xid, __entry->fh_hash,
  385. __entry->offset, __entry->len)
  386. )
  387. #define DEFINE_NFSD_IO_EVENT(name) \
  388. DEFINE_EVENT(nfsd_io_class, nfsd_##name, \
  389. TP_PROTO(struct svc_rqst *rqstp, \
  390. struct svc_fh *fhp, \
  391. u64 offset, \
  392. u32 len), \
  393. TP_ARGS(rqstp, fhp, offset, len))
  394. DEFINE_NFSD_IO_EVENT(read_start);
  395. DEFINE_NFSD_IO_EVENT(read_splice);
  396. DEFINE_NFSD_IO_EVENT(read_vector);
  397. DEFINE_NFSD_IO_EVENT(read_io_done);
  398. DEFINE_NFSD_IO_EVENT(read_done);
  399. DEFINE_NFSD_IO_EVENT(write_start);
  400. DEFINE_NFSD_IO_EVENT(write_opened);
  401. DEFINE_NFSD_IO_EVENT(write_io_done);
  402. DEFINE_NFSD_IO_EVENT(write_done);
  403. DECLARE_EVENT_CLASS(nfsd_err_class,
  404. TP_PROTO(struct svc_rqst *rqstp,
  405. struct svc_fh *fhp,
  406. loff_t offset,
  407. int status),
  408. TP_ARGS(rqstp, fhp, offset, status),
  409. TP_STRUCT__entry(
  410. __field(u32, xid)
  411. __field(u32, fh_hash)
  412. __field(loff_t, offset)
  413. __field(int, status)
  414. ),
  415. TP_fast_assign(
  416. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  417. __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
  418. __entry->offset = offset;
  419. __entry->status = status;
  420. ),
  421. TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld status=%d",
  422. __entry->xid, __entry->fh_hash,
  423. __entry->offset, __entry->status)
  424. )
  425. #define DEFINE_NFSD_ERR_EVENT(name) \
  426. DEFINE_EVENT(nfsd_err_class, nfsd_##name, \
  427. TP_PROTO(struct svc_rqst *rqstp, \
  428. struct svc_fh *fhp, \
  429. loff_t offset, \
  430. int len), \
  431. TP_ARGS(rqstp, fhp, offset, len))
  432. DEFINE_NFSD_ERR_EVENT(read_err);
  433. DEFINE_NFSD_ERR_EVENT(write_err);
  434. TRACE_EVENT(nfsd_dirent,
  435. TP_PROTO(struct svc_fh *fhp,
  436. u64 ino,
  437. const char *name,
  438. int namlen),
  439. TP_ARGS(fhp, ino, name, namlen),
  440. TP_STRUCT__entry(
  441. __field(u32, fh_hash)
  442. __field(u64, ino)
  443. __string_len(name, name, namlen)
  444. ),
  445. TP_fast_assign(
  446. __entry->fh_hash = fhp ? knfsd_fh_hash(&fhp->fh_handle) : 0;
  447. __entry->ino = ino;
  448. __assign_str_len(name, name, namlen)
  449. ),
  450. TP_printk("fh_hash=0x%08x ino=%llu name=%s",
  451. __entry->fh_hash, __entry->ino, __get_str(name)
  452. )
  453. )
  454. DECLARE_EVENT_CLASS(nfsd_copy_err_class,
  455. TP_PROTO(struct svc_rqst *rqstp,
  456. struct svc_fh *src_fhp,
  457. loff_t src_offset,
  458. struct svc_fh *dst_fhp,
  459. loff_t dst_offset,
  460. u64 count,
  461. int status),
  462. TP_ARGS(rqstp, src_fhp, src_offset, dst_fhp, dst_offset, count, status),
  463. TP_STRUCT__entry(
  464. __field(u32, xid)
  465. __field(u32, src_fh_hash)
  466. __field(loff_t, src_offset)
  467. __field(u32, dst_fh_hash)
  468. __field(loff_t, dst_offset)
  469. __field(u64, count)
  470. __field(int, status)
  471. ),
  472. TP_fast_assign(
  473. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  474. __entry->src_fh_hash = knfsd_fh_hash(&src_fhp->fh_handle);
  475. __entry->src_offset = src_offset;
  476. __entry->dst_fh_hash = knfsd_fh_hash(&dst_fhp->fh_handle);
  477. __entry->dst_offset = dst_offset;
  478. __entry->count = count;
  479. __entry->status = status;
  480. ),
  481. TP_printk("xid=0x%08x src_fh_hash=0x%08x src_offset=%lld "
  482. "dst_fh_hash=0x%08x dst_offset=%lld "
  483. "count=%llu status=%d",
  484. __entry->xid, __entry->src_fh_hash, __entry->src_offset,
  485. __entry->dst_fh_hash, __entry->dst_offset,
  486. (unsigned long long)__entry->count,
  487. __entry->status)
  488. )
  489. #define DEFINE_NFSD_COPY_ERR_EVENT(name) \
  490. DEFINE_EVENT(nfsd_copy_err_class, nfsd_##name, \
  491. TP_PROTO(struct svc_rqst *rqstp, \
  492. struct svc_fh *src_fhp, \
  493. loff_t src_offset, \
  494. struct svc_fh *dst_fhp, \
  495. loff_t dst_offset, \
  496. u64 count, \
  497. int status), \
  498. TP_ARGS(rqstp, src_fhp, src_offset, dst_fhp, dst_offset, \
  499. count, status))
  500. DEFINE_NFSD_COPY_ERR_EVENT(clone_file_range_err);
  501. #include "state.h"
  502. #include "filecache.h"
  503. #include "vfs.h"
  504. TRACE_EVENT(nfsd_delegret_wakeup,
  505. TP_PROTO(
  506. const struct svc_rqst *rqstp,
  507. const struct inode *inode,
  508. long timeo
  509. ),
  510. TP_ARGS(rqstp, inode, timeo),
  511. TP_STRUCT__entry(
  512. __field(u32, xid)
  513. __field(const void *, inode)
  514. __field(long, timeo)
  515. ),
  516. TP_fast_assign(
  517. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  518. __entry->inode = inode;
  519. __entry->timeo = timeo;
  520. ),
  521. TP_printk("xid=0x%08x inode=%p%s",
  522. __entry->xid, __entry->inode,
  523. __entry->timeo == 0 ? " (timed out)" : ""
  524. )
  525. );
  526. DECLARE_EVENT_CLASS(nfsd_stateid_class,
  527. TP_PROTO(stateid_t *stp),
  528. TP_ARGS(stp),
  529. TP_STRUCT__entry(
  530. __field(u32, cl_boot)
  531. __field(u32, cl_id)
  532. __field(u32, si_id)
  533. __field(u32, si_generation)
  534. ),
  535. TP_fast_assign(
  536. __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
  537. __entry->cl_id = stp->si_opaque.so_clid.cl_id;
  538. __entry->si_id = stp->si_opaque.so_id;
  539. __entry->si_generation = stp->si_generation;
  540. ),
  541. TP_printk("client %08x:%08x stateid %08x:%08x",
  542. __entry->cl_boot,
  543. __entry->cl_id,
  544. __entry->si_id,
  545. __entry->si_generation)
  546. )
  547. #define DEFINE_STATEID_EVENT(name) \
  548. DEFINE_EVENT(nfsd_stateid_class, nfsd_##name, \
  549. TP_PROTO(stateid_t *stp), \
  550. TP_ARGS(stp))
  551. DEFINE_STATEID_EVENT(layoutstate_alloc);
  552. DEFINE_STATEID_EVENT(layoutstate_unhash);
  553. DEFINE_STATEID_EVENT(layoutstate_free);
  554. DEFINE_STATEID_EVENT(layout_get_lookup_fail);
  555. DEFINE_STATEID_EVENT(layout_commit_lookup_fail);
  556. DEFINE_STATEID_EVENT(layout_return_lookup_fail);
  557. DEFINE_STATEID_EVENT(layout_recall);
  558. DEFINE_STATEID_EVENT(layout_recall_done);
  559. DEFINE_STATEID_EVENT(layout_recall_fail);
  560. DEFINE_STATEID_EVENT(layout_recall_release);
  561. DEFINE_STATEID_EVENT(open);
  562. DEFINE_STATEID_EVENT(deleg_read);
  563. DEFINE_STATEID_EVENT(deleg_recall);
  564. DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
  565. TP_PROTO(u32 seqid, const stateid_t *stp),
  566. TP_ARGS(seqid, stp),
  567. TP_STRUCT__entry(
  568. __field(u32, seqid)
  569. __field(u32, cl_boot)
  570. __field(u32, cl_id)
  571. __field(u32, si_id)
  572. __field(u32, si_generation)
  573. ),
  574. TP_fast_assign(
  575. __entry->seqid = seqid;
  576. __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
  577. __entry->cl_id = stp->si_opaque.so_clid.cl_id;
  578. __entry->si_id = stp->si_opaque.so_id;
  579. __entry->si_generation = stp->si_generation;
  580. ),
  581. TP_printk("seqid=%u client %08x:%08x stateid %08x:%08x",
  582. __entry->seqid, __entry->cl_boot, __entry->cl_id,
  583. __entry->si_id, __entry->si_generation)
  584. )
  585. #define DEFINE_STATESEQID_EVENT(name) \
  586. DEFINE_EVENT(nfsd_stateseqid_class, nfsd_##name, \
  587. TP_PROTO(u32 seqid, const stateid_t *stp), \
  588. TP_ARGS(seqid, stp))
  589. DEFINE_STATESEQID_EVENT(preprocess);
  590. DEFINE_STATESEQID_EVENT(open_confirm);
  591. DECLARE_EVENT_CLASS(nfsd_clientid_class,
  592. TP_PROTO(const clientid_t *clid),
  593. TP_ARGS(clid),
  594. TP_STRUCT__entry(
  595. __field(u32, cl_boot)
  596. __field(u32, cl_id)
  597. ),
  598. TP_fast_assign(
  599. __entry->cl_boot = clid->cl_boot;
  600. __entry->cl_id = clid->cl_id;
  601. ),
  602. TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
  603. )
  604. #define DEFINE_CLIENTID_EVENT(name) \
  605. DEFINE_EVENT(nfsd_clientid_class, nfsd_clid_##name, \
  606. TP_PROTO(const clientid_t *clid), \
  607. TP_ARGS(clid))
  608. DEFINE_CLIENTID_EVENT(expire_unconf);
  609. DEFINE_CLIENTID_EVENT(reclaim_complete);
  610. DEFINE_CLIENTID_EVENT(confirmed);
  611. DEFINE_CLIENTID_EVENT(destroyed);
  612. DEFINE_CLIENTID_EVENT(admin_expired);
  613. DEFINE_CLIENTID_EVENT(replaced);
  614. DEFINE_CLIENTID_EVENT(purged);
  615. DEFINE_CLIENTID_EVENT(renew);
  616. DEFINE_CLIENTID_EVENT(stale);
  617. DECLARE_EVENT_CLASS(nfsd_net_class,
  618. TP_PROTO(const struct nfsd_net *nn),
  619. TP_ARGS(nn),
  620. TP_STRUCT__entry(
  621. __field(unsigned long long, boot_time)
  622. ),
  623. TP_fast_assign(
  624. __entry->boot_time = nn->boot_time;
  625. ),
  626. TP_printk("boot_time=%16llx", __entry->boot_time)
  627. )
  628. #define DEFINE_NET_EVENT(name) \
  629. DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
  630. TP_PROTO(const struct nfsd_net *nn), \
  631. TP_ARGS(nn))
  632. DEFINE_NET_EVENT(grace_start);
  633. DEFINE_NET_EVENT(grace_complete);
  634. TRACE_EVENT(nfsd_writeverf_reset,
  635. TP_PROTO(
  636. const struct nfsd_net *nn,
  637. const struct svc_rqst *rqstp,
  638. int error
  639. ),
  640. TP_ARGS(nn, rqstp, error),
  641. TP_STRUCT__entry(
  642. __field(unsigned long long, boot_time)
  643. __field(u32, xid)
  644. __field(int, error)
  645. __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
  646. ),
  647. TP_fast_assign(
  648. __entry->boot_time = nn->boot_time;
  649. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  650. __entry->error = error;
  651. /* avoid seqlock inside TP_fast_assign */
  652. memcpy(__entry->verifier, nn->writeverf,
  653. NFS4_VERIFIER_SIZE);
  654. ),
  655. TP_printk("boot_time=%16llx xid=0x%08x error=%d new verifier=0x%s",
  656. __entry->boot_time, __entry->xid, __entry->error,
  657. __print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE)
  658. )
  659. );
  660. TRACE_EVENT(nfsd_clid_cred_mismatch,
  661. TP_PROTO(
  662. const struct nfs4_client *clp,
  663. const struct svc_rqst *rqstp
  664. ),
  665. TP_ARGS(clp, rqstp),
  666. TP_STRUCT__entry(
  667. __field(u32, cl_boot)
  668. __field(u32, cl_id)
  669. __field(unsigned long, cl_flavor)
  670. __field(unsigned long, new_flavor)
  671. __sockaddr(addr, rqstp->rq_xprt->xpt_remotelen)
  672. ),
  673. TP_fast_assign(
  674. __entry->cl_boot = clp->cl_clientid.cl_boot;
  675. __entry->cl_id = clp->cl_clientid.cl_id;
  676. __entry->cl_flavor = clp->cl_cred.cr_flavor;
  677. __entry->new_flavor = rqstp->rq_cred.cr_flavor;
  678. __assign_sockaddr(addr, &rqstp->rq_xprt->xpt_remote,
  679. rqstp->rq_xprt->xpt_remotelen);
  680. ),
  681. TP_printk("client %08x:%08x flavor=%s, conflict=%s from addr=%pISpc",
  682. __entry->cl_boot, __entry->cl_id,
  683. show_nfsd_authflavor(__entry->cl_flavor),
  684. show_nfsd_authflavor(__entry->new_flavor),
  685. __get_sockaddr(addr)
  686. )
  687. )
  688. TRACE_EVENT(nfsd_clid_verf_mismatch,
  689. TP_PROTO(
  690. const struct nfs4_client *clp,
  691. const struct svc_rqst *rqstp,
  692. const nfs4_verifier *verf
  693. ),
  694. TP_ARGS(clp, rqstp, verf),
  695. TP_STRUCT__entry(
  696. __field(u32, cl_boot)
  697. __field(u32, cl_id)
  698. __array(unsigned char, cl_verifier, NFS4_VERIFIER_SIZE)
  699. __array(unsigned char, new_verifier, NFS4_VERIFIER_SIZE)
  700. __sockaddr(addr, rqstp->rq_xprt->xpt_remotelen)
  701. ),
  702. TP_fast_assign(
  703. __entry->cl_boot = clp->cl_clientid.cl_boot;
  704. __entry->cl_id = clp->cl_clientid.cl_id;
  705. memcpy(__entry->cl_verifier, (void *)&clp->cl_verifier,
  706. NFS4_VERIFIER_SIZE);
  707. memcpy(__entry->new_verifier, (void *)verf,
  708. NFS4_VERIFIER_SIZE);
  709. __assign_sockaddr(addr, &rqstp->rq_xprt->xpt_remote,
  710. rqstp->rq_xprt->xpt_remotelen);
  711. ),
  712. TP_printk("client %08x:%08x verf=0x%s, updated=0x%s from addr=%pISpc",
  713. __entry->cl_boot, __entry->cl_id,
  714. __print_hex_str(__entry->cl_verifier, NFS4_VERIFIER_SIZE),
  715. __print_hex_str(__entry->new_verifier, NFS4_VERIFIER_SIZE),
  716. __get_sockaddr(addr)
  717. )
  718. );
  719. DECLARE_EVENT_CLASS(nfsd_clid_class,
  720. TP_PROTO(const struct nfs4_client *clp),
  721. TP_ARGS(clp),
  722. TP_STRUCT__entry(
  723. __field(u32, cl_boot)
  724. __field(u32, cl_id)
  725. __array(unsigned char, addr, sizeof(struct sockaddr_in6))
  726. __field(unsigned long, flavor)
  727. __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
  728. __string_len(name, name, clp->cl_name.len)
  729. ),
  730. TP_fast_assign(
  731. __entry->cl_boot = clp->cl_clientid.cl_boot;
  732. __entry->cl_id = clp->cl_clientid.cl_id;
  733. memcpy(__entry->addr, &clp->cl_addr,
  734. sizeof(struct sockaddr_in6));
  735. __entry->flavor = clp->cl_cred.cr_flavor;
  736. memcpy(__entry->verifier, (void *)&clp->cl_verifier,
  737. NFS4_VERIFIER_SIZE);
  738. __assign_str_len(name, clp->cl_name.data, clp->cl_name.len);
  739. ),
  740. TP_printk("addr=%pISpc name='%s' verifier=0x%s flavor=%s client=%08x:%08x",
  741. __entry->addr, __get_str(name),
  742. __print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE),
  743. show_nfsd_authflavor(__entry->flavor),
  744. __entry->cl_boot, __entry->cl_id)
  745. );
  746. #define DEFINE_CLID_EVENT(name) \
  747. DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \
  748. TP_PROTO(const struct nfs4_client *clp), \
  749. TP_ARGS(clp))
  750. DEFINE_CLID_EVENT(fresh);
  751. DEFINE_CLID_EVENT(confirmed_r);
  752. /*
  753. * from fs/nfsd/filecache.h
  754. */
  755. #define show_nf_flags(val) \
  756. __print_flags(val, "|", \
  757. { 1 << NFSD_FILE_HASHED, "HASHED" }, \
  758. { 1 << NFSD_FILE_PENDING, "PENDING" }, \
  759. { 1 << NFSD_FILE_REFERENCED, "REFERENCED" }, \
  760. { 1 << NFSD_FILE_GC, "GC" })
  761. DECLARE_EVENT_CLASS(nfsd_file_class,
  762. TP_PROTO(struct nfsd_file *nf),
  763. TP_ARGS(nf),
  764. TP_STRUCT__entry(
  765. __field(void *, nf_inode)
  766. __field(int, nf_ref)
  767. __field(unsigned long, nf_flags)
  768. __field(unsigned char, nf_may)
  769. __field(struct file *, nf_file)
  770. ),
  771. TP_fast_assign(
  772. __entry->nf_inode = nf->nf_inode;
  773. __entry->nf_ref = refcount_read(&nf->nf_ref);
  774. __entry->nf_flags = nf->nf_flags;
  775. __entry->nf_may = nf->nf_may;
  776. __entry->nf_file = nf->nf_file;
  777. ),
  778. TP_printk("inode=%p ref=%d flags=%s may=%s nf_file=%p",
  779. __entry->nf_inode,
  780. __entry->nf_ref,
  781. show_nf_flags(__entry->nf_flags),
  782. show_nfsd_may_flags(__entry->nf_may),
  783. __entry->nf_file)
  784. )
  785. #define DEFINE_NFSD_FILE_EVENT(name) \
  786. DEFINE_EVENT(nfsd_file_class, name, \
  787. TP_PROTO(struct nfsd_file *nf), \
  788. TP_ARGS(nf))
  789. DEFINE_NFSD_FILE_EVENT(nfsd_file_free);
  790. DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash);
  791. DEFINE_NFSD_FILE_EVENT(nfsd_file_put);
  792. DEFINE_NFSD_FILE_EVENT(nfsd_file_closing);
  793. DEFINE_NFSD_FILE_EVENT(nfsd_file_unhash_and_queue);
  794. TRACE_EVENT(nfsd_file_alloc,
  795. TP_PROTO(
  796. const struct nfsd_file *nf
  797. ),
  798. TP_ARGS(nf),
  799. TP_STRUCT__entry(
  800. __field(const void *, nf_inode)
  801. __field(unsigned long, nf_flags)
  802. __field(unsigned long, nf_may)
  803. __field(unsigned int, nf_ref)
  804. ),
  805. TP_fast_assign(
  806. __entry->nf_inode = nf->nf_inode;
  807. __entry->nf_flags = nf->nf_flags;
  808. __entry->nf_ref = refcount_read(&nf->nf_ref);
  809. __entry->nf_may = nf->nf_may;
  810. ),
  811. TP_printk("inode=%p ref=%u flags=%s may=%s",
  812. __entry->nf_inode, __entry->nf_ref,
  813. show_nf_flags(__entry->nf_flags),
  814. show_nfsd_may_flags(__entry->nf_may)
  815. )
  816. );
  817. TRACE_EVENT(nfsd_file_acquire,
  818. TP_PROTO(
  819. const struct svc_rqst *rqstp,
  820. const struct inode *inode,
  821. unsigned int may_flags,
  822. const struct nfsd_file *nf,
  823. __be32 status
  824. ),
  825. TP_ARGS(rqstp, inode, may_flags, nf, status),
  826. TP_STRUCT__entry(
  827. __field(u32, xid)
  828. __field(const void *, inode)
  829. __field(unsigned long, may_flags)
  830. __field(unsigned int, nf_ref)
  831. __field(unsigned long, nf_flags)
  832. __field(unsigned long, nf_may)
  833. __field(const void *, nf_file)
  834. __field(u32, status)
  835. ),
  836. TP_fast_assign(
  837. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  838. __entry->inode = inode;
  839. __entry->may_flags = may_flags;
  840. __entry->nf_ref = nf ? refcount_read(&nf->nf_ref) : 0;
  841. __entry->nf_flags = nf ? nf->nf_flags : 0;
  842. __entry->nf_may = nf ? nf->nf_may : 0;
  843. __entry->nf_file = nf ? nf->nf_file : NULL;
  844. __entry->status = be32_to_cpu(status);
  845. ),
  846. TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p status=%u",
  847. __entry->xid, __entry->inode,
  848. show_nfsd_may_flags(__entry->may_flags),
  849. __entry->nf_ref, show_nf_flags(__entry->nf_flags),
  850. show_nfsd_may_flags(__entry->nf_may),
  851. __entry->nf_file, __entry->status
  852. )
  853. );
  854. TRACE_EVENT(nfsd_file_insert_err,
  855. TP_PROTO(
  856. const struct svc_rqst *rqstp,
  857. const struct inode *inode,
  858. unsigned int may_flags,
  859. long error
  860. ),
  861. TP_ARGS(rqstp, inode, may_flags, error),
  862. TP_STRUCT__entry(
  863. __field(u32, xid)
  864. __field(const void *, inode)
  865. __field(unsigned long, may_flags)
  866. __field(long, error)
  867. ),
  868. TP_fast_assign(
  869. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  870. __entry->inode = inode;
  871. __entry->may_flags = may_flags;
  872. __entry->error = error;
  873. ),
  874. TP_printk("xid=0x%x inode=%p may_flags=%s error=%ld",
  875. __entry->xid, __entry->inode,
  876. show_nfsd_may_flags(__entry->may_flags),
  877. __entry->error
  878. )
  879. );
  880. TRACE_EVENT(nfsd_file_cons_err,
  881. TP_PROTO(
  882. const struct svc_rqst *rqstp,
  883. const struct inode *inode,
  884. unsigned int may_flags,
  885. const struct nfsd_file *nf
  886. ),
  887. TP_ARGS(rqstp, inode, may_flags, nf),
  888. TP_STRUCT__entry(
  889. __field(u32, xid)
  890. __field(const void *, inode)
  891. __field(unsigned long, may_flags)
  892. __field(unsigned int, nf_ref)
  893. __field(unsigned long, nf_flags)
  894. __field(unsigned long, nf_may)
  895. __field(const void *, nf_file)
  896. ),
  897. TP_fast_assign(
  898. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  899. __entry->inode = inode;
  900. __entry->may_flags = may_flags;
  901. __entry->nf_ref = refcount_read(&nf->nf_ref);
  902. __entry->nf_flags = nf->nf_flags;
  903. __entry->nf_may = nf->nf_may;
  904. __entry->nf_file = nf->nf_file;
  905. ),
  906. TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p",
  907. __entry->xid, __entry->inode,
  908. show_nfsd_may_flags(__entry->may_flags), __entry->nf_ref,
  909. show_nf_flags(__entry->nf_flags),
  910. show_nfsd_may_flags(__entry->nf_may), __entry->nf_file
  911. )
  912. );
  913. DECLARE_EVENT_CLASS(nfsd_file_open_class,
  914. TP_PROTO(const struct nfsd_file *nf, __be32 status),
  915. TP_ARGS(nf, status),
  916. TP_STRUCT__entry(
  917. __field(void *, nf_inode) /* cannot be dereferenced */
  918. __field(int, nf_ref)
  919. __field(unsigned long, nf_flags)
  920. __field(unsigned long, nf_may)
  921. __field(void *, nf_file) /* cannot be dereferenced */
  922. ),
  923. TP_fast_assign(
  924. __entry->nf_inode = nf->nf_inode;
  925. __entry->nf_ref = refcount_read(&nf->nf_ref);
  926. __entry->nf_flags = nf->nf_flags;
  927. __entry->nf_may = nf->nf_may;
  928. __entry->nf_file = nf->nf_file;
  929. ),
  930. TP_printk("inode=%p ref=%d flags=%s may=%s file=%p",
  931. __entry->nf_inode,
  932. __entry->nf_ref,
  933. show_nf_flags(__entry->nf_flags),
  934. show_nfsd_may_flags(__entry->nf_may),
  935. __entry->nf_file)
  936. )
  937. #define DEFINE_NFSD_FILE_OPEN_EVENT(name) \
  938. DEFINE_EVENT(nfsd_file_open_class, name, \
  939. TP_PROTO( \
  940. const struct nfsd_file *nf, \
  941. __be32 status \
  942. ), \
  943. TP_ARGS(nf, status))
  944. DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_open);
  945. DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_opened);
  946. TRACE_EVENT(nfsd_file_is_cached,
  947. TP_PROTO(
  948. const struct inode *inode,
  949. int found
  950. ),
  951. TP_ARGS(inode, found),
  952. TP_STRUCT__entry(
  953. __field(const struct inode *, inode)
  954. __field(int, found)
  955. ),
  956. TP_fast_assign(
  957. __entry->inode = inode;
  958. __entry->found = found;
  959. ),
  960. TP_printk("inode=%p is %scached",
  961. __entry->inode,
  962. __entry->found ? "" : "not "
  963. )
  964. );
  965. TRACE_EVENT(nfsd_file_fsnotify_handle_event,
  966. TP_PROTO(struct inode *inode, u32 mask),
  967. TP_ARGS(inode, mask),
  968. TP_STRUCT__entry(
  969. __field(struct inode *, inode)
  970. __field(unsigned int, nlink)
  971. __field(umode_t, mode)
  972. __field(u32, mask)
  973. ),
  974. TP_fast_assign(
  975. __entry->inode = inode;
  976. __entry->nlink = inode->i_nlink;
  977. __entry->mode = inode->i_mode;
  978. __entry->mask = mask;
  979. ),
  980. TP_printk("inode=%p nlink=%u mode=0%ho mask=0x%x", __entry->inode,
  981. __entry->nlink, __entry->mode, __entry->mask)
  982. );
  983. DECLARE_EVENT_CLASS(nfsd_file_gc_class,
  984. TP_PROTO(
  985. const struct nfsd_file *nf
  986. ),
  987. TP_ARGS(nf),
  988. TP_STRUCT__entry(
  989. __field(void *, nf_inode)
  990. __field(void *, nf_file)
  991. __field(int, nf_ref)
  992. __field(unsigned long, nf_flags)
  993. ),
  994. TP_fast_assign(
  995. __entry->nf_inode = nf->nf_inode;
  996. __entry->nf_file = nf->nf_file;
  997. __entry->nf_ref = refcount_read(&nf->nf_ref);
  998. __entry->nf_flags = nf->nf_flags;
  999. ),
  1000. TP_printk("inode=%p ref=%d nf_flags=%s nf_file=%p",
  1001. __entry->nf_inode, __entry->nf_ref,
  1002. show_nf_flags(__entry->nf_flags),
  1003. __entry->nf_file
  1004. )
  1005. );
  1006. #define DEFINE_NFSD_FILE_GC_EVENT(name) \
  1007. DEFINE_EVENT(nfsd_file_gc_class, name, \
  1008. TP_PROTO( \
  1009. const struct nfsd_file *nf \
  1010. ), \
  1011. TP_ARGS(nf))
  1012. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add);
  1013. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed);
  1014. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del);
  1015. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed);
  1016. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use);
  1017. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback);
  1018. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced);
  1019. DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_disposed);
  1020. DECLARE_EVENT_CLASS(nfsd_file_lruwalk_class,
  1021. TP_PROTO(
  1022. unsigned long removed,
  1023. unsigned long remaining
  1024. ),
  1025. TP_ARGS(removed, remaining),
  1026. TP_STRUCT__entry(
  1027. __field(unsigned long, removed)
  1028. __field(unsigned long, remaining)
  1029. ),
  1030. TP_fast_assign(
  1031. __entry->removed = removed;
  1032. __entry->remaining = remaining;
  1033. ),
  1034. TP_printk("%lu entries removed, %lu remaining",
  1035. __entry->removed, __entry->remaining)
  1036. );
  1037. #define DEFINE_NFSD_FILE_LRUWALK_EVENT(name) \
  1038. DEFINE_EVENT(nfsd_file_lruwalk_class, name, \
  1039. TP_PROTO( \
  1040. unsigned long removed, \
  1041. unsigned long remaining \
  1042. ), \
  1043. TP_ARGS(removed, remaining))
  1044. DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_removed);
  1045. DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_shrinker_removed);
  1046. TRACE_EVENT(nfsd_file_close,
  1047. TP_PROTO(
  1048. const struct inode *inode
  1049. ),
  1050. TP_ARGS(inode),
  1051. TP_STRUCT__entry(
  1052. __field(const void *, inode)
  1053. ),
  1054. TP_fast_assign(
  1055. __entry->inode = inode;
  1056. ),
  1057. TP_printk("inode=%p",
  1058. __entry->inode
  1059. )
  1060. );
  1061. #include "cache.h"
  1062. TRACE_DEFINE_ENUM(RC_DROPIT);
  1063. TRACE_DEFINE_ENUM(RC_REPLY);
  1064. TRACE_DEFINE_ENUM(RC_DOIT);
  1065. #define show_drc_retval(x) \
  1066. __print_symbolic(x, \
  1067. { RC_DROPIT, "DROPIT" }, \
  1068. { RC_REPLY, "REPLY" }, \
  1069. { RC_DOIT, "DOIT" })
  1070. TRACE_EVENT(nfsd_drc_found,
  1071. TP_PROTO(
  1072. const struct nfsd_net *nn,
  1073. const struct svc_rqst *rqstp,
  1074. int result
  1075. ),
  1076. TP_ARGS(nn, rqstp, result),
  1077. TP_STRUCT__entry(
  1078. __field(unsigned long long, boot_time)
  1079. __field(unsigned long, result)
  1080. __field(u32, xid)
  1081. ),
  1082. TP_fast_assign(
  1083. __entry->boot_time = nn->boot_time;
  1084. __entry->result = result;
  1085. __entry->xid = be32_to_cpu(rqstp->rq_xid);
  1086. ),
  1087. TP_printk("boot_time=%16llx xid=0x%08x result=%s",
  1088. __entry->boot_time, __entry->xid,
  1089. show_drc_retval(__entry->result))
  1090. );
  1091. TRACE_EVENT(nfsd_drc_mismatch,
  1092. TP_PROTO(
  1093. const struct nfsd_net *nn,
  1094. const struct svc_cacherep *key,
  1095. const struct svc_cacherep *rp
  1096. ),
  1097. TP_ARGS(nn, key, rp),
  1098. TP_STRUCT__entry(
  1099. __field(unsigned long long, boot_time)
  1100. __field(u32, xid)
  1101. __field(u32, cached)
  1102. __field(u32, ingress)
  1103. ),
  1104. TP_fast_assign(
  1105. __entry->boot_time = nn->boot_time;
  1106. __entry->xid = be32_to_cpu(key->c_key.k_xid);
  1107. __entry->cached = (__force u32)key->c_key.k_csum;
  1108. __entry->ingress = (__force u32)rp->c_key.k_csum;
  1109. ),
  1110. TP_printk("boot_time=%16llx xid=0x%08x cached-csum=0x%08x ingress-csum=0x%08x",
  1111. __entry->boot_time, __entry->xid, __entry->cached,
  1112. __entry->ingress)
  1113. );
  1114. TRACE_EVENT(nfsd_cb_args,
  1115. TP_PROTO(
  1116. const struct nfs4_client *clp,
  1117. const struct nfs4_cb_conn *conn
  1118. ),
  1119. TP_ARGS(clp, conn),
  1120. TP_STRUCT__entry(
  1121. __field(u32, cl_boot)
  1122. __field(u32, cl_id)
  1123. __field(u32, prog)
  1124. __field(u32, ident)
  1125. __sockaddr(addr, conn->cb_addrlen)
  1126. ),
  1127. TP_fast_assign(
  1128. __entry->cl_boot = clp->cl_clientid.cl_boot;
  1129. __entry->cl_id = clp->cl_clientid.cl_id;
  1130. __entry->prog = conn->cb_prog;
  1131. __entry->ident = conn->cb_ident;
  1132. __assign_sockaddr(addr, &conn->cb_addr, conn->cb_addrlen);
  1133. ),
  1134. TP_printk("addr=%pISpc client %08x:%08x prog=%u ident=%u",
  1135. __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
  1136. __entry->prog, __entry->ident)
  1137. );
  1138. TRACE_EVENT(nfsd_cb_nodelegs,
  1139. TP_PROTO(const struct nfs4_client *clp),
  1140. TP_ARGS(clp),
  1141. TP_STRUCT__entry(
  1142. __field(u32, cl_boot)
  1143. __field(u32, cl_id)
  1144. ),
  1145. TP_fast_assign(
  1146. __entry->cl_boot = clp->cl_clientid.cl_boot;
  1147. __entry->cl_id = clp->cl_clientid.cl_id;
  1148. ),
  1149. TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
  1150. )
  1151. #define show_cb_state(val) \
  1152. __print_symbolic(val, \
  1153. { NFSD4_CB_UP, "UP" }, \
  1154. { NFSD4_CB_UNKNOWN, "UNKNOWN" }, \
  1155. { NFSD4_CB_DOWN, "DOWN" }, \
  1156. { NFSD4_CB_FAULT, "FAULT"})
  1157. DECLARE_EVENT_CLASS(nfsd_cb_class,
  1158. TP_PROTO(const struct nfs4_client *clp),
  1159. TP_ARGS(clp),
  1160. TP_STRUCT__entry(
  1161. __field(unsigned long, state)
  1162. __field(u32, cl_boot)
  1163. __field(u32, cl_id)
  1164. __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
  1165. ),
  1166. TP_fast_assign(
  1167. __entry->state = clp->cl_cb_state;
  1168. __entry->cl_boot = clp->cl_clientid.cl_boot;
  1169. __entry->cl_id = clp->cl_clientid.cl_id;
  1170. __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
  1171. clp->cl_cb_conn.cb_addrlen)
  1172. ),
  1173. TP_printk("addr=%pISpc client %08x:%08x state=%s",
  1174. __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
  1175. show_cb_state(__entry->state))
  1176. );
  1177. #define DEFINE_NFSD_CB_EVENT(name) \
  1178. DEFINE_EVENT(nfsd_cb_class, nfsd_cb_##name, \
  1179. TP_PROTO(const struct nfs4_client *clp), \
  1180. TP_ARGS(clp))
  1181. DEFINE_NFSD_CB_EVENT(state);
  1182. DEFINE_NFSD_CB_EVENT(probe);
  1183. DEFINE_NFSD_CB_EVENT(lost);
  1184. DEFINE_NFSD_CB_EVENT(shutdown);
  1185. TRACE_DEFINE_ENUM(RPC_AUTH_NULL);
  1186. TRACE_DEFINE_ENUM(RPC_AUTH_UNIX);
  1187. TRACE_DEFINE_ENUM(RPC_AUTH_GSS);
  1188. TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5);
  1189. TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5I);
  1190. TRACE_DEFINE_ENUM(RPC_AUTH_GSS_KRB5P);
  1191. #define show_nfsd_authflavor(val) \
  1192. __print_symbolic(val, \
  1193. { RPC_AUTH_NULL, "none" }, \
  1194. { RPC_AUTH_UNIX, "sys" }, \
  1195. { RPC_AUTH_GSS, "gss" }, \
  1196. { RPC_AUTH_GSS_KRB5, "krb5" }, \
  1197. { RPC_AUTH_GSS_KRB5I, "krb5i" }, \
  1198. { RPC_AUTH_GSS_KRB5P, "krb5p" })
  1199. TRACE_EVENT(nfsd_cb_setup,
  1200. TP_PROTO(const struct nfs4_client *clp,
  1201. const char *netid,
  1202. rpc_authflavor_t authflavor
  1203. ),
  1204. TP_ARGS(clp, netid, authflavor),
  1205. TP_STRUCT__entry(
  1206. __field(u32, cl_boot)
  1207. __field(u32, cl_id)
  1208. __field(unsigned long, authflavor)
  1209. __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
  1210. __array(unsigned char, netid, 8)
  1211. ),
  1212. TP_fast_assign(
  1213. __entry->cl_boot = clp->cl_clientid.cl_boot;
  1214. __entry->cl_id = clp->cl_clientid.cl_id;
  1215. strlcpy(__entry->netid, netid, sizeof(__entry->netid));
  1216. __entry->authflavor = authflavor;
  1217. __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
  1218. clp->cl_cb_conn.cb_addrlen)
  1219. ),
  1220. TP_printk("addr=%pISpc client %08x:%08x proto=%s flavor=%s",
  1221. __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
  1222. __entry->netid, show_nfsd_authflavor(__entry->authflavor))
  1223. );
  1224. TRACE_EVENT(nfsd_cb_setup_err,
  1225. TP_PROTO(
  1226. const struct nfs4_client *clp,
  1227. long error
  1228. ),
  1229. TP_ARGS(clp, error),
  1230. TP_STRUCT__entry(
  1231. __field(long, error)
  1232. __field(u32, cl_boot)
  1233. __field(u32, cl_id)
  1234. __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
  1235. ),
  1236. TP_fast_assign(
  1237. __entry->error = error;
  1238. __entry->cl_boot = clp->cl_clientid.cl_boot;
  1239. __entry->cl_id = clp->cl_clientid.cl_id;
  1240. __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
  1241. clp->cl_cb_conn.cb_addrlen)
  1242. ),
  1243. TP_printk("addr=%pISpc client %08x:%08x error=%ld",
  1244. __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
  1245. __entry->error)
  1246. );
  1247. TRACE_EVENT_CONDITION(nfsd_cb_recall,
  1248. TP_PROTO(
  1249. const struct nfs4_stid *stid
  1250. ),
  1251. TP_ARGS(stid),
  1252. TP_CONDITION(stid->sc_client),
  1253. TP_STRUCT__entry(
  1254. __field(u32, cl_boot)
  1255. __field(u32, cl_id)
  1256. __field(u32, si_id)
  1257. __field(u32, si_generation)
  1258. __sockaddr(addr, stid->sc_client->cl_cb_conn.cb_addrlen)
  1259. ),
  1260. TP_fast_assign(
  1261. const stateid_t *stp = &stid->sc_stateid;
  1262. const struct nfs4_client *clp = stid->sc_client;
  1263. __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
  1264. __entry->cl_id = stp->si_opaque.so_clid.cl_id;
  1265. __entry->si_id = stp->si_opaque.so_id;
  1266. __entry->si_generation = stp->si_generation;
  1267. __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
  1268. clp->cl_cb_conn.cb_addrlen)
  1269. ),
  1270. TP_printk("addr=%pISpc client %08x:%08x stateid %08x:%08x",
  1271. __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
  1272. __entry->si_id, __entry->si_generation)
  1273. );
  1274. TRACE_EVENT(nfsd_cb_notify_lock,
  1275. TP_PROTO(
  1276. const struct nfs4_lockowner *lo,
  1277. const struct nfsd4_blocked_lock *nbl
  1278. ),
  1279. TP_ARGS(lo, nbl),
  1280. TP_STRUCT__entry(
  1281. __field(u32, cl_boot)
  1282. __field(u32, cl_id)
  1283. __field(u32, fh_hash)
  1284. __sockaddr(addr, lo->lo_owner.so_client->cl_cb_conn.cb_addrlen)
  1285. ),
  1286. TP_fast_assign(
  1287. const struct nfs4_client *clp = lo->lo_owner.so_client;
  1288. __entry->cl_boot = clp->cl_clientid.cl_boot;
  1289. __entry->cl_id = clp->cl_clientid.cl_id;
  1290. __entry->fh_hash = knfsd_fh_hash(&nbl->nbl_fh);
  1291. __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
  1292. clp->cl_cb_conn.cb_addrlen)
  1293. ),
  1294. TP_printk("addr=%pISpc client %08x:%08x fh_hash=0x%08x",
  1295. __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
  1296. __entry->fh_hash)
  1297. );
  1298. TRACE_EVENT(nfsd_cb_offload,
  1299. TP_PROTO(
  1300. const struct nfs4_client *clp,
  1301. const stateid_t *stp,
  1302. const struct knfsd_fh *fh,
  1303. u64 count,
  1304. __be32 status
  1305. ),
  1306. TP_ARGS(clp, stp, fh, count, status),
  1307. TP_STRUCT__entry(
  1308. __field(u32, cl_boot)
  1309. __field(u32, cl_id)
  1310. __field(u32, si_id)
  1311. __field(u32, si_generation)
  1312. __field(u32, fh_hash)
  1313. __field(int, status)
  1314. __field(u64, count)
  1315. __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
  1316. ),
  1317. TP_fast_assign(
  1318. __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
  1319. __entry->cl_id = stp->si_opaque.so_clid.cl_id;
  1320. __entry->si_id = stp->si_opaque.so_id;
  1321. __entry->si_generation = stp->si_generation;
  1322. __entry->fh_hash = knfsd_fh_hash(fh);
  1323. __entry->status = be32_to_cpu(status);
  1324. __entry->count = count;
  1325. __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
  1326. clp->cl_cb_conn.cb_addrlen)
  1327. ),
  1328. TP_printk("addr=%pISpc client %08x:%08x stateid %08x:%08x fh_hash=0x%08x count=%llu status=%d",
  1329. __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
  1330. __entry->si_id, __entry->si_generation,
  1331. __entry->fh_hash, __entry->count, __entry->status)
  1332. );
  1333. DECLARE_EVENT_CLASS(nfsd_cb_done_class,
  1334. TP_PROTO(
  1335. const stateid_t *stp,
  1336. const struct rpc_task *task
  1337. ),
  1338. TP_ARGS(stp, task),
  1339. TP_STRUCT__entry(
  1340. __field(u32, cl_boot)
  1341. __field(u32, cl_id)
  1342. __field(u32, si_id)
  1343. __field(u32, si_generation)
  1344. __field(int, status)
  1345. ),
  1346. TP_fast_assign(
  1347. __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
  1348. __entry->cl_id = stp->si_opaque.so_clid.cl_id;
  1349. __entry->si_id = stp->si_opaque.so_id;
  1350. __entry->si_generation = stp->si_generation;
  1351. __entry->status = task->tk_status;
  1352. ),
  1353. TP_printk("client %08x:%08x stateid %08x:%08x status=%d",
  1354. __entry->cl_boot, __entry->cl_id, __entry->si_id,
  1355. __entry->si_generation, __entry->status
  1356. )
  1357. );
  1358. #define DEFINE_NFSD_CB_DONE_EVENT(name) \
  1359. DEFINE_EVENT(nfsd_cb_done_class, name, \
  1360. TP_PROTO( \
  1361. const stateid_t *stp, \
  1362. const struct rpc_task *task \
  1363. ), \
  1364. TP_ARGS(stp, task))
  1365. DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_recall_done);
  1366. DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_notify_lock_done);
  1367. DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_layout_done);
  1368. DEFINE_NFSD_CB_DONE_EVENT(nfsd_cb_offload_done);
  1369. #endif /* _NFSD_TRACE_H */
  1370. #undef TRACE_INCLUDE_PATH
  1371. #define TRACE_INCLUDE_PATH .
  1372. #define TRACE_INCLUDE_FILE trace
  1373. #include <trace/define_trace.h>