trace_tid.h 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642
  1. /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
  2. /*
  3. * Copyright(c) 2018 Intel Corporation.
  4. *
  5. */
  6. #if !defined(__HFI1_TRACE_TID_H) || defined(TRACE_HEADER_MULTI_READ)
  7. #define __HFI1_TRACE_TID_H
  8. #include <linux/tracepoint.h>
  9. #include <linux/trace_seq.h>
  10. #include "hfi.h"
  11. #define tidtype_name(type) { PT_##type, #type }
  12. #define show_tidtype(type) \
  13. __print_symbolic(type, \
  14. tidtype_name(EXPECTED), \
  15. tidtype_name(EAGER), \
  16. tidtype_name(INVALID)) \
  17. #undef TRACE_SYSTEM
  18. #define TRACE_SYSTEM hfi1_tid
  19. u8 hfi1_trace_get_tid_ctrl(u32 ent);
  20. u16 hfi1_trace_get_tid_len(u32 ent);
  21. u16 hfi1_trace_get_tid_idx(u32 ent);
  22. #define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
  23. "max write %u, max length %u, jkey 0x%x timeout %u " \
  24. "urg %u"
  25. #define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \
  26. "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \
  27. "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \
  28. "tidcnt %u tid_idx %u tid_offset %u length %u sent %u"
  29. #define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \
  30. "used %u cnt %u"
  31. #define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
  32. "r_psn 0x%x r_state 0x%x r_flags 0x%x " \
  33. "r_head_ack_queue %u s_tail_ack_queue %u " \
  34. "s_acked_ack_queue %u s_ack_state 0x%x " \
  35. "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \
  36. "iow_flags 0x%lx"
  37. #define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \
  38. "s_head %u s_acked %u s_last %u s_psn 0x%x " \
  39. "s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \
  40. "iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u"
  41. #define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \
  42. "tid_r_comp %u pending_tid_r_segs %u " \
  43. "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
  44. "s_state 0x%x hw_flow_index %u generation 0x%x " \
  45. "fpsn 0x%x"
  46. #define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
  47. "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \
  48. "total_segs %u setup_head %u clear_tail %u flow_idx %u " \
  49. "acked_tail %u state %u r_ack_psn 0x%x r_flow_psn 0x%x " \
  50. "r_last_ackd 0x%x s_next_psn 0x%x"
  51. #define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
  52. "s_acked_ack_queue %u s_tail_ack_queue %u " \
  53. "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \
  54. " diff %d"
  55. #define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \
  56. "r_tid_ack %u r_tid_alloc %u alloc_w_segs %u " \
  57. "pending_tid_w_segs %u sync_pt %s " \
  58. "ps_nak_psn 0x%x ps_nak_state 0x%x " \
  59. "prnr_nak_state 0x%x hw_flow_index %u generation "\
  60. "0x%x fpsn 0x%x resync %s" \
  61. "r_next_psn_kdeth 0x%x"
  62. #define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \
  63. "s_tid_tail %u s_tid_head %u " \
  64. "pending_tid_w_resp %u n_requests %u " \
  65. "n_tid_requests %u s_flags 0x%x ps_flags 0x%x "\
  66. "iow_flags 0x%lx s_state 0x%x s_retry %u"
  67. #define KDETH_EFLAGS_ERR_PRN "[%s] qpn 0x%x TID ERR: RcvType 0x%x " \
  68. "RcvTypeError 0x%x PSN 0x%x"
  69. DECLARE_EVENT_CLASS(/* class */
  70. hfi1_exp_tid_reg_unreg,
  71. TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
  72. unsigned long va, unsigned long pa, dma_addr_t dma),
  73. TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
  74. TP_STRUCT__entry(/* entry */
  75. __field(unsigned int, ctxt)
  76. __field(u16, subctxt)
  77. __field(u32, rarr)
  78. __field(u32, npages)
  79. __field(unsigned long, va)
  80. __field(unsigned long, pa)
  81. __field(dma_addr_t, dma)
  82. ),
  83. TP_fast_assign(/* assign */
  84. __entry->ctxt = ctxt;
  85. __entry->subctxt = subctxt;
  86. __entry->rarr = rarr;
  87. __entry->npages = npages;
  88. __entry->va = va;
  89. __entry->pa = pa;
  90. __entry->dma = dma;
  91. ),
  92. TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
  93. __entry->ctxt,
  94. __entry->subctxt,
  95. __entry->rarr,
  96. __entry->npages,
  97. __entry->pa,
  98. __entry->va,
  99. __entry->dma
  100. )
  101. );
  102. DEFINE_EVENT(/* exp_tid_unreg */
  103. hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg,
  104. TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
  105. unsigned long va, unsigned long pa, dma_addr_t dma),
  106. TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
  107. );
  108. DEFINE_EVENT(/* exp_tid_reg */
  109. hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg,
  110. TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
  111. unsigned long va, unsigned long pa, dma_addr_t dma),
  112. TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
  113. );
  114. TRACE_EVENT(/* put_tid */
  115. hfi1_put_tid,
  116. TP_PROTO(struct hfi1_devdata *dd,
  117. u32 index, u32 type, unsigned long pa, u16 order),
  118. TP_ARGS(dd, index, type, pa, order),
  119. TP_STRUCT__entry(/* entry */
  120. DD_DEV_ENTRY(dd)
  121. __field(unsigned long, pa)
  122. __field(u32, index)
  123. __field(u32, type)
  124. __field(u16, order)
  125. ),
  126. TP_fast_assign(/* assign */
  127. DD_DEV_ASSIGN(dd);
  128. __entry->pa = pa;
  129. __entry->index = index;
  130. __entry->type = type;
  131. __entry->order = order;
  132. ),
  133. TP_printk("[%s] type %s pa %lx index %u order %u",
  134. __get_str(dev),
  135. show_tidtype(__entry->type),
  136. __entry->pa,
  137. __entry->index,
  138. __entry->order
  139. )
  140. );
  141. TRACE_EVENT(/* exp_tid_inval */
  142. hfi1_exp_tid_inval,
  143. TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
  144. u32 npages, dma_addr_t dma),
  145. TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
  146. TP_STRUCT__entry(/* entry */
  147. __field(unsigned int, ctxt)
  148. __field(u16, subctxt)
  149. __field(unsigned long, va)
  150. __field(u32, rarr)
  151. __field(u32, npages)
  152. __field(dma_addr_t, dma)
  153. ),
  154. TP_fast_assign(/* assign */
  155. __entry->ctxt = ctxt;
  156. __entry->subctxt = subctxt;
  157. __entry->va = va;
  158. __entry->rarr = rarr;
  159. __entry->npages = npages;
  160. __entry->dma = dma;
  161. ),
  162. TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
  163. __entry->ctxt,
  164. __entry->subctxt,
  165. __entry->rarr,
  166. __entry->npages,
  167. __entry->va,
  168. __entry->dma
  169. )
  170. );
  171. DECLARE_EVENT_CLASS(/* opfn_state */
  172. hfi1_opfn_state_template,
  173. TP_PROTO(struct rvt_qp *qp),
  174. TP_ARGS(qp),
  175. TP_STRUCT__entry(/* entry */
  176. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  177. __field(u32, qpn)
  178. __field(u16, requested)
  179. __field(u16, completed)
  180. __field(u8, curr)
  181. ),
  182. TP_fast_assign(/* assign */
  183. struct hfi1_qp_priv *priv = qp->priv;
  184. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  185. __entry->qpn = qp->ibqp.qp_num;
  186. __entry->requested = priv->opfn.requested;
  187. __entry->completed = priv->opfn.completed;
  188. __entry->curr = priv->opfn.curr;
  189. ),
  190. TP_printk(/* print */
  191. "[%s] qpn 0x%x requested 0x%x completed 0x%x curr 0x%x",
  192. __get_str(dev),
  193. __entry->qpn,
  194. __entry->requested,
  195. __entry->completed,
  196. __entry->curr
  197. )
  198. );
  199. DEFINE_EVENT(/* event */
  200. hfi1_opfn_state_template, hfi1_opfn_state_conn_request,
  201. TP_PROTO(struct rvt_qp *qp),
  202. TP_ARGS(qp)
  203. );
  204. DEFINE_EVENT(/* event */
  205. hfi1_opfn_state_template, hfi1_opfn_state_sched_conn_request,
  206. TP_PROTO(struct rvt_qp *qp),
  207. TP_ARGS(qp)
  208. );
  209. DEFINE_EVENT(/* event */
  210. hfi1_opfn_state_template, hfi1_opfn_state_conn_response,
  211. TP_PROTO(struct rvt_qp *qp),
  212. TP_ARGS(qp)
  213. );
  214. DEFINE_EVENT(/* event */
  215. hfi1_opfn_state_template, hfi1_opfn_state_conn_reply,
  216. TP_PROTO(struct rvt_qp *qp),
  217. TP_ARGS(qp)
  218. );
  219. DEFINE_EVENT(/* event */
  220. hfi1_opfn_state_template, hfi1_opfn_state_conn_error,
  221. TP_PROTO(struct rvt_qp *qp),
  222. TP_ARGS(qp)
  223. );
  224. DECLARE_EVENT_CLASS(/* opfn_data */
  225. hfi1_opfn_data_template,
  226. TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
  227. TP_ARGS(qp, capcode, data),
  228. TP_STRUCT__entry(/* entry */
  229. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  230. __field(u32, qpn)
  231. __field(u32, state)
  232. __field(u8, capcode)
  233. __field(u64, data)
  234. ),
  235. TP_fast_assign(/* assign */
  236. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  237. __entry->qpn = qp->ibqp.qp_num;
  238. __entry->state = qp->state;
  239. __entry->capcode = capcode;
  240. __entry->data = data;
  241. ),
  242. TP_printk(/* printk */
  243. "[%s] qpn 0x%x (state 0x%x) Capcode %u data 0x%llx",
  244. __get_str(dev),
  245. __entry->qpn,
  246. __entry->state,
  247. __entry->capcode,
  248. __entry->data
  249. )
  250. );
  251. DEFINE_EVENT(/* event */
  252. hfi1_opfn_data_template, hfi1_opfn_data_conn_request,
  253. TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
  254. TP_ARGS(qp, capcode, data)
  255. );
  256. DEFINE_EVENT(/* event */
  257. hfi1_opfn_data_template, hfi1_opfn_data_conn_response,
  258. TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
  259. TP_ARGS(qp, capcode, data)
  260. );
  261. DEFINE_EVENT(/* event */
  262. hfi1_opfn_data_template, hfi1_opfn_data_conn_reply,
  263. TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
  264. TP_ARGS(qp, capcode, data)
  265. );
  266. DECLARE_EVENT_CLASS(/* opfn_param */
  267. hfi1_opfn_param_template,
  268. TP_PROTO(struct rvt_qp *qp, char remote,
  269. struct tid_rdma_params *param),
  270. TP_ARGS(qp, remote, param),
  271. TP_STRUCT__entry(/* entry */
  272. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  273. __field(u32, qpn)
  274. __field(char, remote)
  275. __field(u32, param_qp)
  276. __field(u32, max_len)
  277. __field(u16, jkey)
  278. __field(u8, max_read)
  279. __field(u8, max_write)
  280. __field(u8, timeout)
  281. __field(u8, urg)
  282. ),
  283. TP_fast_assign(/* assign */
  284. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  285. __entry->qpn = qp->ibqp.qp_num;
  286. __entry->remote = remote;
  287. __entry->param_qp = param->qp;
  288. __entry->max_len = param->max_len;
  289. __entry->jkey = param->jkey;
  290. __entry->max_read = param->max_read;
  291. __entry->max_write = param->max_write;
  292. __entry->timeout = param->timeout;
  293. __entry->urg = param->urg;
  294. ),
  295. TP_printk(/* print */
  296. OPFN_PARAM_PRN,
  297. __get_str(dev),
  298. __entry->qpn,
  299. __entry->remote ? "remote" : "local",
  300. __entry->param_qp,
  301. __entry->max_read,
  302. __entry->max_write,
  303. __entry->max_len,
  304. __entry->jkey,
  305. __entry->timeout,
  306. __entry->urg
  307. )
  308. );
  309. DEFINE_EVENT(/* event */
  310. hfi1_opfn_param_template, hfi1_opfn_param,
  311. TP_PROTO(struct rvt_qp *qp, char remote,
  312. struct tid_rdma_params *param),
  313. TP_ARGS(qp, remote, param)
  314. );
  315. DECLARE_EVENT_CLASS(/* msg */
  316. hfi1_msg_template,
  317. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  318. TP_ARGS(qp, msg, more),
  319. TP_STRUCT__entry(/* entry */
  320. __field(u32, qpn)
  321. __string(msg, msg)
  322. __field(u64, more)
  323. ),
  324. TP_fast_assign(/* assign */
  325. __entry->qpn = qp ? qp->ibqp.qp_num : 0;
  326. __assign_str(msg, msg);
  327. __entry->more = more;
  328. ),
  329. TP_printk(/* print */
  330. "qpn 0x%x %s 0x%llx",
  331. __entry->qpn,
  332. __get_str(msg),
  333. __entry->more
  334. )
  335. );
  336. DEFINE_EVENT(/* event */
  337. hfi1_msg_template, hfi1_msg_opfn_conn_request,
  338. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  339. TP_ARGS(qp, msg, more)
  340. );
  341. DEFINE_EVENT(/* event */
  342. hfi1_msg_template, hfi1_msg_opfn_conn_error,
  343. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  344. TP_ARGS(qp, msg, more)
  345. );
  346. DEFINE_EVENT(/* event */
  347. hfi1_msg_template, hfi1_msg_alloc_tids,
  348. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  349. TP_ARGS(qp, msg, more)
  350. );
  351. DEFINE_EVENT(/* event */
  352. hfi1_msg_template, hfi1_msg_tid_restart_req,
  353. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  354. TP_ARGS(qp, msg, more)
  355. );
  356. DEFINE_EVENT(/* event */
  357. hfi1_msg_template, hfi1_msg_handle_kdeth_eflags,
  358. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  359. TP_ARGS(qp, msg, more)
  360. );
  361. DEFINE_EVENT(/* event */
  362. hfi1_msg_template, hfi1_msg_tid_timeout,
  363. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  364. TP_ARGS(qp, msg, more)
  365. );
  366. DEFINE_EVENT(/* event */
  367. hfi1_msg_template, hfi1_msg_tid_retry_timeout,
  368. TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
  369. TP_ARGS(qp, msg, more)
  370. );
  371. DECLARE_EVENT_CLASS(/* tid_flow_page */
  372. hfi1_tid_flow_page_template,
  373. TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
  374. char mtu8k, char v1, void *vaddr),
  375. TP_ARGS(qp, flow, index, mtu8k, v1, vaddr),
  376. TP_STRUCT__entry(/* entry */
  377. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  378. __field(u32, qpn)
  379. __field(char, mtu8k)
  380. __field(char, v1)
  381. __field(u32, index)
  382. __field(u64, page)
  383. __field(u64, vaddr)
  384. ),
  385. TP_fast_assign(/* assign */
  386. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  387. __entry->qpn = qp->ibqp.qp_num;
  388. __entry->mtu8k = mtu8k;
  389. __entry->v1 = v1;
  390. __entry->index = index;
  391. __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL;
  392. __entry->vaddr = (u64)vaddr;
  393. ),
  394. TP_printk(/* print */
  395. "[%s] qpn 0x%x page[%u]: page 0x%llx %s 0x%llx",
  396. __get_str(dev),
  397. __entry->qpn,
  398. __entry->index,
  399. __entry->page,
  400. __entry->mtu8k ? (__entry->v1 ? "v1" : "v0") : "vaddr",
  401. __entry->vaddr
  402. )
  403. );
  404. DEFINE_EVENT(/* event */
  405. hfi1_tid_flow_page_template, hfi1_tid_flow_page,
  406. TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
  407. char mtu8k, char v1, void *vaddr),
  408. TP_ARGS(qp, flow, index, mtu8k, v1, vaddr)
  409. );
  410. DECLARE_EVENT_CLASS(/* tid_pageset */
  411. hfi1_tid_pageset_template,
  412. TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
  413. TP_ARGS(qp, index, idx, count),
  414. TP_STRUCT__entry(/* entry */
  415. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  416. __field(u32, qpn)
  417. __field(u32, index)
  418. __field(u16, idx)
  419. __field(u16, count)
  420. ),
  421. TP_fast_assign(/* assign */
  422. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  423. __entry->qpn = qp->ibqp.qp_num;
  424. __entry->index = index;
  425. __entry->idx = idx;
  426. __entry->count = count;
  427. ),
  428. TP_printk(/* print */
  429. "[%s] qpn 0x%x list[%u]: idx %u count %u",
  430. __get_str(dev),
  431. __entry->qpn,
  432. __entry->index,
  433. __entry->idx,
  434. __entry->count
  435. )
  436. );
  437. DEFINE_EVENT(/* event */
  438. hfi1_tid_pageset_template, hfi1_tid_pageset,
  439. TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
  440. TP_ARGS(qp, index, idx, count)
  441. );
  442. DECLARE_EVENT_CLASS(/* tid_fow */
  443. hfi1_tid_flow_template,
  444. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  445. TP_ARGS(qp, index, flow),
  446. TP_STRUCT__entry(/* entry */
  447. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  448. __field(u32, qpn)
  449. __field(int, index)
  450. __field(int, idx)
  451. __field(u32, resp_ib_psn)
  452. __field(u32, generation)
  453. __field(u32, fspsn)
  454. __field(u32, flpsn)
  455. __field(u32, r_next_psn)
  456. __field(u32, ib_spsn)
  457. __field(u32, ib_lpsn)
  458. __field(u32, npagesets)
  459. __field(u32, tnode_cnt)
  460. __field(u32, tidcnt)
  461. __field(u32, tid_idx)
  462. __field(u32, tid_offset)
  463. __field(u32, length)
  464. __field(u32, sent)
  465. ),
  466. TP_fast_assign(/* assign */
  467. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  468. __entry->qpn = qp->ibqp.qp_num;
  469. __entry->index = index;
  470. __entry->idx = flow->idx;
  471. __entry->resp_ib_psn = flow->flow_state.resp_ib_psn;
  472. __entry->generation = flow->flow_state.generation;
  473. __entry->fspsn = full_flow_psn(flow,
  474. flow->flow_state.spsn);
  475. __entry->flpsn = full_flow_psn(flow,
  476. flow->flow_state.lpsn);
  477. __entry->r_next_psn = flow->flow_state.r_next_psn;
  478. __entry->ib_spsn = flow->flow_state.ib_spsn;
  479. __entry->ib_lpsn = flow->flow_state.ib_lpsn;
  480. __entry->npagesets = flow->npagesets;
  481. __entry->tnode_cnt = flow->tnode_cnt;
  482. __entry->tidcnt = flow->tidcnt;
  483. __entry->tid_idx = flow->tid_idx;
  484. __entry->tid_offset = flow->tid_offset;
  485. __entry->length = flow->length;
  486. __entry->sent = flow->sent;
  487. ),
  488. TP_printk(/* print */
  489. TID_FLOW_PRN,
  490. __get_str(dev),
  491. __entry->qpn,
  492. __entry->index,
  493. __entry->idx,
  494. __entry->resp_ib_psn,
  495. __entry->generation,
  496. __entry->fspsn,
  497. __entry->flpsn,
  498. __entry->r_next_psn,
  499. __entry->ib_spsn,
  500. __entry->ib_lpsn,
  501. __entry->npagesets,
  502. __entry->tnode_cnt,
  503. __entry->tidcnt,
  504. __entry->tid_idx,
  505. __entry->tid_offset,
  506. __entry->length,
  507. __entry->sent
  508. )
  509. );
  510. DEFINE_EVENT(/* event */
  511. hfi1_tid_flow_template, hfi1_tid_flow_alloc,
  512. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  513. TP_ARGS(qp, index, flow)
  514. );
  515. DEFINE_EVENT(/* event */
  516. hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt,
  517. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  518. TP_ARGS(qp, index, flow)
  519. );
  520. DEFINE_EVENT(/* event */
  521. hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp,
  522. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  523. TP_ARGS(qp, index, flow)
  524. );
  525. DEFINE_EVENT(/* event */
  526. hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req,
  527. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  528. TP_ARGS(qp, index, flow)
  529. );
  530. DEFINE_EVENT(/* event */
  531. hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp,
  532. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  533. TP_ARGS(qp, index, flow)
  534. );
  535. DEFINE_EVENT(/* event */
  536. hfi1_tid_flow_template, hfi1_tid_flow_restart_req,
  537. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  538. TP_ARGS(qp, index, flow)
  539. );
  540. DEFINE_EVENT(/* event */
  541. hfi1_tid_flow_template, hfi1_tid_flow_build_write_resp,
  542. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  543. TP_ARGS(qp, index, flow)
  544. );
  545. DEFINE_EVENT(/* event */
  546. hfi1_tid_flow_template, hfi1_tid_flow_rcv_write_resp,
  547. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  548. TP_ARGS(qp, index, flow)
  549. );
  550. DEFINE_EVENT(/* event */
  551. hfi1_tid_flow_template, hfi1_tid_flow_build_write_data,
  552. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  553. TP_ARGS(qp, index, flow)
  554. );
  555. DEFINE_EVENT(/* event */
  556. hfi1_tid_flow_template, hfi1_tid_flow_rcv_tid_ack,
  557. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  558. TP_ARGS(qp, index, flow)
  559. );
  560. DEFINE_EVENT(/* event */
  561. hfi1_tid_flow_template, hfi1_tid_flow_rcv_resync,
  562. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  563. TP_ARGS(qp, index, flow)
  564. );
  565. DEFINE_EVENT(/* event */
  566. hfi1_tid_flow_template, hfi1_tid_flow_handle_kdeth_eflags,
  567. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  568. TP_ARGS(qp, index, flow)
  569. );
  570. DEFINE_EVENT(/* event */
  571. hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags,
  572. TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
  573. TP_ARGS(qp, index, flow)
  574. );
  575. DECLARE_EVENT_CLASS(/* tid_node */
  576. hfi1_tid_node_template,
  577. TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
  578. u8 map, u8 used, u8 cnt),
  579. TP_ARGS(qp, msg, index, base, map, used, cnt),
  580. TP_STRUCT__entry(/* entry */
  581. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  582. __field(u32, qpn)
  583. __string(msg, msg)
  584. __field(u32, index)
  585. __field(u32, base)
  586. __field(u8, map)
  587. __field(u8, used)
  588. __field(u8, cnt)
  589. ),
  590. TP_fast_assign(/* assign */
  591. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  592. __entry->qpn = qp->ibqp.qp_num;
  593. __assign_str(msg, msg);
  594. __entry->index = index;
  595. __entry->base = base;
  596. __entry->map = map;
  597. __entry->used = used;
  598. __entry->cnt = cnt;
  599. ),
  600. TP_printk(/* print */
  601. TID_NODE_PRN,
  602. __get_str(dev),
  603. __entry->qpn,
  604. __get_str(msg),
  605. __entry->index,
  606. __entry->base,
  607. __entry->map,
  608. __entry->used,
  609. __entry->cnt
  610. )
  611. );
  612. DEFINE_EVENT(/* event */
  613. hfi1_tid_node_template, hfi1_tid_node_add,
  614. TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
  615. u8 map, u8 used, u8 cnt),
  616. TP_ARGS(qp, msg, index, base, map, used, cnt)
  617. );
  618. DECLARE_EVENT_CLASS(/* tid_entry */
  619. hfi1_tid_entry_template,
  620. TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
  621. TP_ARGS(qp, index, ent),
  622. TP_STRUCT__entry(/* entry */
  623. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  624. __field(u32, qpn)
  625. __field(int, index)
  626. __field(u8, ctrl)
  627. __field(u16, idx)
  628. __field(u16, len)
  629. ),
  630. TP_fast_assign(/* assign */
  631. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  632. __entry->qpn = qp->ibqp.qp_num;
  633. __entry->index = index;
  634. __entry->ctrl = hfi1_trace_get_tid_ctrl(ent);
  635. __entry->idx = hfi1_trace_get_tid_idx(ent);
  636. __entry->len = hfi1_trace_get_tid_len(ent);
  637. ),
  638. TP_printk(/* print */
  639. "[%s] qpn 0x%x TID entry %d: idx %u len %u ctrl 0x%x",
  640. __get_str(dev),
  641. __entry->qpn,
  642. __entry->index,
  643. __entry->idx,
  644. __entry->len,
  645. __entry->ctrl
  646. )
  647. );
  648. DEFINE_EVENT(/* event */
  649. hfi1_tid_entry_template, hfi1_tid_entry_alloc,
  650. TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
  651. TP_ARGS(qp, index, entry)
  652. );
  653. DEFINE_EVENT(/* event */
  654. hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp,
  655. TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
  656. TP_ARGS(qp, index, ent)
  657. );
  658. DEFINE_EVENT(/* event */
  659. hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req,
  660. TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
  661. TP_ARGS(qp, index, ent)
  662. );
  663. DEFINE_EVENT(/* event */
  664. hfi1_tid_entry_template, hfi1_tid_entry_rcv_write_resp,
  665. TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
  666. TP_ARGS(qp, index, entry)
  667. );
  668. DEFINE_EVENT(/* event */
  669. hfi1_tid_entry_template, hfi1_tid_entry_build_write_data,
  670. TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
  671. TP_ARGS(qp, index, entry)
  672. );
  673. DECLARE_EVENT_CLASS(/* rsp_info */
  674. hfi1_responder_info_template,
  675. TP_PROTO(struct rvt_qp *qp, u32 psn),
  676. TP_ARGS(qp, psn),
  677. TP_STRUCT__entry(/* entry */
  678. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  679. __field(u32, qpn)
  680. __field(u8, state)
  681. __field(u8, s_state)
  682. __field(u32, psn)
  683. __field(u32, r_psn)
  684. __field(u8, r_state)
  685. __field(u8, r_flags)
  686. __field(u8, r_head_ack_queue)
  687. __field(u8, s_tail_ack_queue)
  688. __field(u8, s_acked_ack_queue)
  689. __field(u8, s_ack_state)
  690. __field(u8, s_nak_state)
  691. __field(u8, r_nak_state)
  692. __field(u32, s_flags)
  693. __field(u32, ps_flags)
  694. __field(unsigned long, iow_flags)
  695. ),
  696. TP_fast_assign(/* assign */
  697. struct hfi1_qp_priv *priv = qp->priv;
  698. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  699. __entry->qpn = qp->ibqp.qp_num;
  700. __entry->state = qp->state;
  701. __entry->s_state = qp->s_state;
  702. __entry->psn = psn;
  703. __entry->r_psn = qp->r_psn;
  704. __entry->r_state = qp->r_state;
  705. __entry->r_flags = qp->r_flags;
  706. __entry->r_head_ack_queue = qp->r_head_ack_queue;
  707. __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
  708. __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
  709. __entry->s_ack_state = qp->s_ack_state;
  710. __entry->s_nak_state = qp->s_nak_state;
  711. __entry->s_flags = qp->s_flags;
  712. __entry->ps_flags = priv->s_flags;
  713. __entry->iow_flags = priv->s_iowait.flags;
  714. ),
  715. TP_printk(/* print */
  716. RSP_INFO_PRN,
  717. __get_str(dev),
  718. __entry->qpn,
  719. __entry->state,
  720. __entry->s_state,
  721. __entry->psn,
  722. __entry->r_psn,
  723. __entry->r_state,
  724. __entry->r_flags,
  725. __entry->r_head_ack_queue,
  726. __entry->s_tail_ack_queue,
  727. __entry->s_acked_ack_queue,
  728. __entry->s_ack_state,
  729. __entry->s_nak_state,
  730. __entry->s_flags,
  731. __entry->ps_flags,
  732. __entry->iow_flags
  733. )
  734. );
  735. DEFINE_EVENT(/* event */
  736. hfi1_responder_info_template, hfi1_rsp_make_rc_ack,
  737. TP_PROTO(struct rvt_qp *qp, u32 psn),
  738. TP_ARGS(qp, psn)
  739. );
  740. DEFINE_EVENT(/* event */
  741. hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req,
  742. TP_PROTO(struct rvt_qp *qp, u32 psn),
  743. TP_ARGS(qp, psn)
  744. );
  745. DEFINE_EVENT(/* event */
  746. hfi1_responder_info_template, hfi1_rsp_tid_rcv_error,
  747. TP_PROTO(struct rvt_qp *qp, u32 psn),
  748. TP_ARGS(qp, psn)
  749. );
  750. DEFINE_EVENT(/* event */
  751. hfi1_responder_info_template, hfi1_rsp_tid_write_alloc_res,
  752. TP_PROTO(struct rvt_qp *qp, u32 psn),
  753. TP_ARGS(qp, psn)
  754. );
  755. DEFINE_EVENT(/* event */
  756. hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_req,
  757. TP_PROTO(struct rvt_qp *qp, u32 psn),
  758. TP_ARGS(qp, psn)
  759. );
  760. DEFINE_EVENT(/* event */
  761. hfi1_responder_info_template, hfi1_rsp_build_tid_write_resp,
  762. TP_PROTO(struct rvt_qp *qp, u32 psn),
  763. TP_ARGS(qp, psn)
  764. );
  765. DEFINE_EVENT(/* event */
  766. hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_data,
  767. TP_PROTO(struct rvt_qp *qp, u32 psn),
  768. TP_ARGS(qp, psn)
  769. );
  770. DEFINE_EVENT(/* event */
  771. hfi1_responder_info_template, hfi1_rsp_make_tid_ack,
  772. TP_PROTO(struct rvt_qp *qp, u32 psn),
  773. TP_ARGS(qp, psn)
  774. );
  775. DEFINE_EVENT(/* event */
  776. hfi1_responder_info_template, hfi1_rsp_handle_kdeth_eflags,
  777. TP_PROTO(struct rvt_qp *qp, u32 psn),
  778. TP_ARGS(qp, psn)
  779. );
  780. DEFINE_EVENT(/* event */
  781. hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags,
  782. TP_PROTO(struct rvt_qp *qp, u32 psn),
  783. TP_ARGS(qp, psn)
  784. );
  785. DECLARE_EVENT_CLASS(/* sender_info */
  786. hfi1_sender_info_template,
  787. TP_PROTO(struct rvt_qp *qp),
  788. TP_ARGS(qp),
  789. TP_STRUCT__entry(/* entry */
  790. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  791. __field(u32, qpn)
  792. __field(u8, state)
  793. __field(u32, s_cur)
  794. __field(u32, s_tail)
  795. __field(u32, s_head)
  796. __field(u32, s_acked)
  797. __field(u32, s_last)
  798. __field(u32, s_psn)
  799. __field(u32, s_last_psn)
  800. __field(u32, s_flags)
  801. __field(u32, ps_flags)
  802. __field(unsigned long, iow_flags)
  803. __field(u8, s_state)
  804. __field(u8, s_num_rd)
  805. __field(u8, s_retry)
  806. ),
  807. TP_fast_assign(/* assign */
  808. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  809. __entry->qpn = qp->ibqp.qp_num;
  810. __entry->state = qp->state;
  811. __entry->s_cur = qp->s_cur;
  812. __entry->s_tail = qp->s_tail;
  813. __entry->s_head = qp->s_head;
  814. __entry->s_acked = qp->s_acked;
  815. __entry->s_last = qp->s_last;
  816. __entry->s_psn = qp->s_psn;
  817. __entry->s_last_psn = qp->s_last_psn;
  818. __entry->s_flags = qp->s_flags;
  819. __entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags;
  820. __entry->iow_flags =
  821. ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
  822. __entry->s_state = qp->s_state;
  823. __entry->s_num_rd = qp->s_num_rd_atomic;
  824. __entry->s_retry = qp->s_retry;
  825. ),
  826. TP_printk(/* print */
  827. SENDER_INFO_PRN,
  828. __get_str(dev),
  829. __entry->qpn,
  830. __entry->state,
  831. __entry->s_cur,
  832. __entry->s_tail,
  833. __entry->s_head,
  834. __entry->s_acked,
  835. __entry->s_last,
  836. __entry->s_psn,
  837. __entry->s_last_psn,
  838. __entry->s_flags,
  839. __entry->ps_flags,
  840. __entry->iow_flags,
  841. __entry->s_state,
  842. __entry->s_num_rd,
  843. __entry->s_retry
  844. )
  845. );
  846. DEFINE_EVENT(/* event */
  847. hfi1_sender_info_template, hfi1_sender_make_rc_req,
  848. TP_PROTO(struct rvt_qp *qp),
  849. TP_ARGS(qp)
  850. );
  851. DEFINE_EVENT(/* event */
  852. hfi1_sender_info_template, hfi1_sender_reset_psn,
  853. TP_PROTO(struct rvt_qp *qp),
  854. TP_ARGS(qp)
  855. );
  856. DEFINE_EVENT(/* event */
  857. hfi1_sender_info_template, hfi1_sender_restart_rc,
  858. TP_PROTO(struct rvt_qp *qp),
  859. TP_ARGS(qp)
  860. );
  861. DEFINE_EVENT(/* event */
  862. hfi1_sender_info_template, hfi1_sender_do_rc_ack,
  863. TP_PROTO(struct rvt_qp *qp),
  864. TP_ARGS(qp)
  865. );
  866. DEFINE_EVENT(/* event */
  867. hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp,
  868. TP_PROTO(struct rvt_qp *qp),
  869. TP_ARGS(qp)
  870. );
  871. DEFINE_EVENT(/* event */
  872. hfi1_sender_info_template, hfi1_sender_rcv_tid_ack,
  873. TP_PROTO(struct rvt_qp *qp),
  874. TP_ARGS(qp)
  875. );
  876. DEFINE_EVENT(/* event */
  877. hfi1_sender_info_template, hfi1_sender_make_tid_pkt,
  878. TP_PROTO(struct rvt_qp *qp),
  879. TP_ARGS(qp)
  880. );
  881. DEFINE_EVENT(/* event */
  882. hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags,
  883. TP_PROTO(struct rvt_qp *qp),
  884. TP_ARGS(qp)
  885. );
  886. DECLARE_EVENT_CLASS(/* tid_read_sender */
  887. hfi1_tid_read_sender_template,
  888. TP_PROTO(struct rvt_qp *qp, char newreq),
  889. TP_ARGS(qp, newreq),
  890. TP_STRUCT__entry(/* entry */
  891. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  892. __field(u32, qpn)
  893. __field(char, newreq)
  894. __field(u32, tid_r_reqs)
  895. __field(u32, tid_r_comp)
  896. __field(u32, pending_tid_r_segs)
  897. __field(u32, s_flags)
  898. __field(u32, ps_flags)
  899. __field(unsigned long, iow_flags)
  900. __field(u8, s_state)
  901. __field(u32, hw_flow_index)
  902. __field(u32, generation)
  903. __field(u32, fpsn)
  904. ),
  905. TP_fast_assign(/* assign */
  906. struct hfi1_qp_priv *priv = qp->priv;
  907. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  908. __entry->qpn = qp->ibqp.qp_num;
  909. __entry->newreq = newreq;
  910. __entry->tid_r_reqs = priv->tid_r_reqs;
  911. __entry->tid_r_comp = priv->tid_r_comp;
  912. __entry->pending_tid_r_segs = priv->pending_tid_r_segs;
  913. __entry->s_flags = qp->s_flags;
  914. __entry->ps_flags = priv->s_flags;
  915. __entry->iow_flags = priv->s_iowait.flags;
  916. __entry->s_state = priv->s_state;
  917. __entry->hw_flow_index = priv->flow_state.index;
  918. __entry->generation = priv->flow_state.generation;
  919. __entry->fpsn = priv->flow_state.psn;
  920. ),
  921. TP_printk(/* print */
  922. TID_READ_SENDER_PRN,
  923. __get_str(dev),
  924. __entry->qpn,
  925. __entry->newreq,
  926. __entry->tid_r_reqs,
  927. __entry->tid_r_comp,
  928. __entry->pending_tid_r_segs,
  929. __entry->s_flags,
  930. __entry->ps_flags,
  931. __entry->iow_flags,
  932. __entry->s_state,
  933. __entry->hw_flow_index,
  934. __entry->generation,
  935. __entry->fpsn
  936. )
  937. );
  938. DEFINE_EVENT(/* event */
  939. hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req,
  940. TP_PROTO(struct rvt_qp *qp, char newreq),
  941. TP_ARGS(qp, newreq)
  942. );
  943. DEFINE_EVENT(/* event */
  944. hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags,
  945. TP_PROTO(struct rvt_qp *qp, char newreq),
  946. TP_ARGS(qp, newreq)
  947. );
  948. DECLARE_EVENT_CLASS(/* tid_rdma_request */
  949. hfi1_tid_rdma_request_template,
  950. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  951. struct tid_rdma_request *req),
  952. TP_ARGS(qp, newreq, opcode, psn, lpsn, req),
  953. TP_STRUCT__entry(/* entry */
  954. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  955. __field(u32, qpn)
  956. __field(char, newreq)
  957. __field(u8, opcode)
  958. __field(u32, psn)
  959. __field(u32, lpsn)
  960. __field(u32, cur_seg)
  961. __field(u32, comp_seg)
  962. __field(u32, ack_seg)
  963. __field(u32, alloc_seg)
  964. __field(u32, total_segs)
  965. __field(u16, setup_head)
  966. __field(u16, clear_tail)
  967. __field(u16, flow_idx)
  968. __field(u16, acked_tail)
  969. __field(u32, state)
  970. __field(u32, r_ack_psn)
  971. __field(u32, r_flow_psn)
  972. __field(u32, r_last_acked)
  973. __field(u32, s_next_psn)
  974. ),
  975. TP_fast_assign(/* assign */
  976. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  977. __entry->qpn = qp->ibqp.qp_num;
  978. __entry->newreq = newreq;
  979. __entry->opcode = opcode;
  980. __entry->psn = psn;
  981. __entry->lpsn = lpsn;
  982. __entry->cur_seg = req->cur_seg;
  983. __entry->comp_seg = req->comp_seg;
  984. __entry->ack_seg = req->ack_seg;
  985. __entry->alloc_seg = req->alloc_seg;
  986. __entry->total_segs = req->total_segs;
  987. __entry->setup_head = req->setup_head;
  988. __entry->clear_tail = req->clear_tail;
  989. __entry->flow_idx = req->flow_idx;
  990. __entry->acked_tail = req->acked_tail;
  991. __entry->state = req->state;
  992. __entry->r_ack_psn = req->r_ack_psn;
  993. __entry->r_flow_psn = req->r_flow_psn;
  994. __entry->r_last_acked = req->r_last_acked;
  995. __entry->s_next_psn = req->s_next_psn;
  996. ),
  997. TP_printk(/* print */
  998. TID_REQ_PRN,
  999. __get_str(dev),
  1000. __entry->qpn,
  1001. __entry->newreq,
  1002. __entry->opcode,
  1003. __entry->psn,
  1004. __entry->lpsn,
  1005. __entry->cur_seg,
  1006. __entry->comp_seg,
  1007. __entry->ack_seg,
  1008. __entry->alloc_seg,
  1009. __entry->total_segs,
  1010. __entry->setup_head,
  1011. __entry->clear_tail,
  1012. __entry->flow_idx,
  1013. __entry->acked_tail,
  1014. __entry->state,
  1015. __entry->r_ack_psn,
  1016. __entry->r_flow_psn,
  1017. __entry->r_last_acked,
  1018. __entry->s_next_psn
  1019. )
  1020. );
  1021. DEFINE_EVENT(/* event */
  1022. hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read,
  1023. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1024. struct tid_rdma_request *req),
  1025. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1026. );
  1027. DEFINE_EVENT(/* event */
  1028. hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req,
  1029. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1030. struct tid_rdma_request *req),
  1031. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1032. );
  1033. DEFINE_EVENT(/* event */
  1034. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req,
  1035. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1036. struct tid_rdma_request *req),
  1037. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1038. );
  1039. DEFINE_EVENT(/* event */
  1040. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp,
  1041. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1042. struct tid_rdma_request *req),
  1043. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1044. );
  1045. DEFINE_EVENT(/* event */
  1046. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err,
  1047. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1048. struct tid_rdma_request *req),
  1049. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1050. );
  1051. DEFINE_EVENT(/* event */
  1052. hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req,
  1053. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1054. struct tid_rdma_request *req),
  1055. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1056. );
  1057. DEFINE_EVENT(/* event */
  1058. hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe,
  1059. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1060. struct tid_rdma_request *req),
  1061. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1062. );
  1063. DEFINE_EVENT(/* event */
  1064. hfi1_tid_rdma_request_template, hfi1_tid_req_write_alloc_res,
  1065. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1066. struct tid_rdma_request *req),
  1067. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1068. );
  1069. DEFINE_EVENT(/* event */
  1070. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_req,
  1071. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1072. struct tid_rdma_request *req),
  1073. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1074. );
  1075. DEFINE_EVENT(/* event */
  1076. hfi1_tid_rdma_request_template, hfi1_tid_req_build_write_resp,
  1077. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1078. struct tid_rdma_request *req),
  1079. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1080. );
  1081. DEFINE_EVENT(/* event */
  1082. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_resp,
  1083. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1084. struct tid_rdma_request *req),
  1085. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1086. );
  1087. DEFINE_EVENT(/* event */
  1088. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_data,
  1089. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1090. struct tid_rdma_request *req),
  1091. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1092. );
  1093. DEFINE_EVENT(/* event */
  1094. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_tid_ack,
  1095. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1096. struct tid_rdma_request *req),
  1097. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1098. );
  1099. DEFINE_EVENT(/* event */
  1100. hfi1_tid_rdma_request_template, hfi1_tid_req_tid_retry_timeout,
  1101. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1102. struct tid_rdma_request *req),
  1103. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1104. );
  1105. DEFINE_EVENT(/* event */
  1106. hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_resync,
  1107. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1108. struct tid_rdma_request *req),
  1109. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1110. );
  1111. DEFINE_EVENT(/* event */
  1112. hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_pkt,
  1113. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1114. struct tid_rdma_request *req),
  1115. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1116. );
  1117. DEFINE_EVENT(/* event */
  1118. hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_ack,
  1119. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1120. struct tid_rdma_request *req),
  1121. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1122. );
  1123. DEFINE_EVENT(/* event */
  1124. hfi1_tid_rdma_request_template, hfi1_tid_req_handle_kdeth_eflags,
  1125. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1126. struct tid_rdma_request *req),
  1127. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1128. );
  1129. DEFINE_EVENT(/* event */
  1130. hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags,
  1131. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1132. struct tid_rdma_request *req),
  1133. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1134. );
  1135. DEFINE_EVENT(/* event */
  1136. hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write,
  1137. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1138. struct tid_rdma_request *req),
  1139. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1140. );
  1141. DEFINE_EVENT(/* event */
  1142. hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_write,
  1143. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1144. struct tid_rdma_request *req),
  1145. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1146. );
  1147. DEFINE_EVENT(/* event */
  1148. hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic,
  1149. TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
  1150. struct tid_rdma_request *req),
  1151. TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
  1152. );
  1153. DECLARE_EVENT_CLASS(/* rc_rcv_err */
  1154. hfi1_rc_rcv_err_template,
  1155. TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
  1156. TP_ARGS(qp, opcode, psn, diff),
  1157. TP_STRUCT__entry(/* entry */
  1158. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  1159. __field(u32, qpn)
  1160. __field(u32, s_flags)
  1161. __field(u8, state)
  1162. __field(u8, s_acked_ack_queue)
  1163. __field(u8, s_tail_ack_queue)
  1164. __field(u8, r_head_ack_queue)
  1165. __field(u32, opcode)
  1166. __field(u32, psn)
  1167. __field(u32, r_psn)
  1168. __field(int, diff)
  1169. ),
  1170. TP_fast_assign(/* assign */
  1171. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  1172. __entry->qpn = qp->ibqp.qp_num;
  1173. __entry->s_flags = qp->s_flags;
  1174. __entry->state = qp->state;
  1175. __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
  1176. __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
  1177. __entry->r_head_ack_queue = qp->r_head_ack_queue;
  1178. __entry->opcode = opcode;
  1179. __entry->psn = psn;
  1180. __entry->r_psn = qp->r_psn;
  1181. __entry->diff = diff;
  1182. ),
  1183. TP_printk(/* print */
  1184. RCV_ERR_PRN,
  1185. __get_str(dev),
  1186. __entry->qpn,
  1187. __entry->s_flags,
  1188. __entry->state,
  1189. __entry->s_acked_ack_queue,
  1190. __entry->s_tail_ack_queue,
  1191. __entry->r_head_ack_queue,
  1192. __entry->opcode,
  1193. __entry->psn,
  1194. __entry->r_psn,
  1195. __entry->diff
  1196. )
  1197. );
  1198. DEFINE_EVENT(/* event */
  1199. hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err,
  1200. TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
  1201. TP_ARGS(qp, opcode, psn, diff)
  1202. );
  1203. DECLARE_EVENT_CLASS(/* sge */
  1204. hfi1_sge_template,
  1205. TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
  1206. TP_ARGS(qp, index, sge),
  1207. TP_STRUCT__entry(/* entry */
  1208. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  1209. __field(u32, qpn)
  1210. __field(int, index)
  1211. __field(u64, vaddr)
  1212. __field(u32, sge_length)
  1213. ),
  1214. TP_fast_assign(/* assign */
  1215. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  1216. __entry->qpn = qp->ibqp.qp_num;
  1217. __entry->index = index;
  1218. __entry->vaddr = (u64)sge->vaddr;
  1219. __entry->sge_length = sge->sge_length;
  1220. ),
  1221. TP_printk(/* print */
  1222. "[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u",
  1223. __get_str(dev),
  1224. __entry->qpn,
  1225. __entry->index,
  1226. __entry->vaddr,
  1227. __entry->sge_length
  1228. )
  1229. );
  1230. DEFINE_EVENT(/* event */
  1231. hfi1_sge_template, hfi1_sge_check_align,
  1232. TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
  1233. TP_ARGS(qp, index, sge)
  1234. );
  1235. DECLARE_EVENT_CLASS(/* tid_write_sp */
  1236. hfi1_tid_write_rsp_template,
  1237. TP_PROTO(struct rvt_qp *qp),
  1238. TP_ARGS(qp),
  1239. TP_STRUCT__entry(/* entry */
  1240. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  1241. __field(u32, qpn)
  1242. __field(u32, r_tid_head)
  1243. __field(u32, r_tid_tail)
  1244. __field(u32, r_tid_ack)
  1245. __field(u32, r_tid_alloc)
  1246. __field(u32, alloc_w_segs)
  1247. __field(u32, pending_tid_w_segs)
  1248. __field(bool, sync_pt)
  1249. __field(u32, ps_nak_psn)
  1250. __field(u8, ps_nak_state)
  1251. __field(u8, prnr_nak_state)
  1252. __field(u32, hw_flow_index)
  1253. __field(u32, generation)
  1254. __field(u32, fpsn)
  1255. __field(bool, resync)
  1256. __field(u32, r_next_psn_kdeth)
  1257. ),
  1258. TP_fast_assign(/* assign */
  1259. struct hfi1_qp_priv *priv = qp->priv;
  1260. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  1261. __entry->qpn = qp->ibqp.qp_num;
  1262. __entry->r_tid_head = priv->r_tid_head;
  1263. __entry->r_tid_tail = priv->r_tid_tail;
  1264. __entry->r_tid_ack = priv->r_tid_ack;
  1265. __entry->r_tid_alloc = priv->r_tid_alloc;
  1266. __entry->alloc_w_segs = priv->alloc_w_segs;
  1267. __entry->pending_tid_w_segs = priv->pending_tid_w_segs;
  1268. __entry->sync_pt = priv->sync_pt;
  1269. __entry->ps_nak_psn = priv->s_nak_psn;
  1270. __entry->ps_nak_state = priv->s_nak_state;
  1271. __entry->prnr_nak_state = priv->rnr_nak_state;
  1272. __entry->hw_flow_index = priv->flow_state.index;
  1273. __entry->generation = priv->flow_state.generation;
  1274. __entry->fpsn = priv->flow_state.psn;
  1275. __entry->resync = priv->resync;
  1276. __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth;
  1277. ),
  1278. TP_printk(/* print */
  1279. TID_WRITE_RSPDR_PRN,
  1280. __get_str(dev),
  1281. __entry->qpn,
  1282. __entry->r_tid_head,
  1283. __entry->r_tid_tail,
  1284. __entry->r_tid_ack,
  1285. __entry->r_tid_alloc,
  1286. __entry->alloc_w_segs,
  1287. __entry->pending_tid_w_segs,
  1288. __entry->sync_pt ? "yes" : "no",
  1289. __entry->ps_nak_psn,
  1290. __entry->ps_nak_state,
  1291. __entry->prnr_nak_state,
  1292. __entry->hw_flow_index,
  1293. __entry->generation,
  1294. __entry->fpsn,
  1295. __entry->resync ? "yes" : "no",
  1296. __entry->r_next_psn_kdeth
  1297. )
  1298. );
  1299. DEFINE_EVENT(/* event */
  1300. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_alloc_res,
  1301. TP_PROTO(struct rvt_qp *qp),
  1302. TP_ARGS(qp)
  1303. );
  1304. DEFINE_EVENT(/* event */
  1305. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_req,
  1306. TP_PROTO(struct rvt_qp *qp),
  1307. TP_ARGS(qp)
  1308. );
  1309. DEFINE_EVENT(/* event */
  1310. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_build_resp,
  1311. TP_PROTO(struct rvt_qp *qp),
  1312. TP_ARGS(qp)
  1313. );
  1314. DEFINE_EVENT(/* event */
  1315. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_data,
  1316. TP_PROTO(struct rvt_qp *qp),
  1317. TP_ARGS(qp)
  1318. );
  1319. DEFINE_EVENT(/* event */
  1320. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_resync,
  1321. TP_PROTO(struct rvt_qp *qp),
  1322. TP_ARGS(qp)
  1323. );
  1324. DEFINE_EVENT(/* event */
  1325. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_tid_ack,
  1326. TP_PROTO(struct rvt_qp *qp),
  1327. TP_ARGS(qp)
  1328. );
  1329. DEFINE_EVENT(/* event */
  1330. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_handle_kdeth_eflags,
  1331. TP_PROTO(struct rvt_qp *qp),
  1332. TP_ARGS(qp)
  1333. );
  1334. DEFINE_EVENT(/* event */
  1335. hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_rc_ack,
  1336. TP_PROTO(struct rvt_qp *qp),
  1337. TP_ARGS(qp)
  1338. );
  1339. DECLARE_EVENT_CLASS(/* tid_write_sender */
  1340. hfi1_tid_write_sender_template,
  1341. TP_PROTO(struct rvt_qp *qp, char newreq),
  1342. TP_ARGS(qp, newreq),
  1343. TP_STRUCT__entry(/* entry */
  1344. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  1345. __field(u32, qpn)
  1346. __field(char, newreq)
  1347. __field(u32, s_tid_cur)
  1348. __field(u32, s_tid_tail)
  1349. __field(u32, s_tid_head)
  1350. __field(u32, pending_tid_w_resp)
  1351. __field(u32, n_requests)
  1352. __field(u32, n_tid_requests)
  1353. __field(u32, s_flags)
  1354. __field(u32, ps_flags)
  1355. __field(unsigned long, iow_flags)
  1356. __field(u8, s_state)
  1357. __field(u8, s_retry)
  1358. ),
  1359. TP_fast_assign(/* assign */
  1360. struct hfi1_qp_priv *priv = qp->priv;
  1361. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  1362. __entry->qpn = qp->ibqp.qp_num;
  1363. __entry->newreq = newreq;
  1364. __entry->s_tid_cur = priv->s_tid_cur;
  1365. __entry->s_tid_tail = priv->s_tid_tail;
  1366. __entry->s_tid_head = priv->s_tid_head;
  1367. __entry->pending_tid_w_resp = priv->pending_tid_w_resp;
  1368. __entry->n_requests = atomic_read(&priv->n_requests);
  1369. __entry->n_tid_requests = atomic_read(&priv->n_tid_requests);
  1370. __entry->s_flags = qp->s_flags;
  1371. __entry->ps_flags = priv->s_flags;
  1372. __entry->iow_flags = priv->s_iowait.flags;
  1373. __entry->s_state = priv->s_state;
  1374. __entry->s_retry = priv->s_retry;
  1375. ),
  1376. TP_printk(/* print */
  1377. TID_WRITE_SENDER_PRN,
  1378. __get_str(dev),
  1379. __entry->qpn,
  1380. __entry->newreq,
  1381. __entry->s_tid_cur,
  1382. __entry->s_tid_tail,
  1383. __entry->s_tid_head,
  1384. __entry->pending_tid_w_resp,
  1385. __entry->n_requests,
  1386. __entry->n_tid_requests,
  1387. __entry->s_flags,
  1388. __entry->ps_flags,
  1389. __entry->iow_flags,
  1390. __entry->s_state,
  1391. __entry->s_retry
  1392. )
  1393. );
  1394. DEFINE_EVENT(/* event */
  1395. hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_resp,
  1396. TP_PROTO(struct rvt_qp *qp, char newreq),
  1397. TP_ARGS(qp, newreq)
  1398. );
  1399. DEFINE_EVENT(/* event */
  1400. hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_tid_ack,
  1401. TP_PROTO(struct rvt_qp *qp, char newreq),
  1402. TP_ARGS(qp, newreq)
  1403. );
  1404. DEFINE_EVENT(/* event */
  1405. hfi1_tid_write_sender_template, hfi1_tid_write_sender_retry_timeout,
  1406. TP_PROTO(struct rvt_qp *qp, char newreq),
  1407. TP_ARGS(qp, newreq)
  1408. );
  1409. DEFINE_EVENT(/* event */
  1410. hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_tid_pkt,
  1411. TP_PROTO(struct rvt_qp *qp, char newreq),
  1412. TP_ARGS(qp, newreq)
  1413. );
  1414. DEFINE_EVENT(/* event */
  1415. hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_req,
  1416. TP_PROTO(struct rvt_qp *qp, char newreq),
  1417. TP_ARGS(qp, newreq)
  1418. );
  1419. DEFINE_EVENT(/* event */
  1420. hfi1_tid_write_sender_template, hfi1_tid_write_sender_restart_rc,
  1421. TP_PROTO(struct rvt_qp *qp, char newreq),
  1422. TP_ARGS(qp, newreq)
  1423. );
  1424. DECLARE_EVENT_CLASS(/* tid_ack */
  1425. hfi1_tid_ack_template,
  1426. TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
  1427. u32 req_psn, u32 resync_psn),
  1428. TP_ARGS(qp, aeth, psn, req_psn, resync_psn),
  1429. TP_STRUCT__entry(/* entry */
  1430. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  1431. __field(u32, qpn)
  1432. __field(u32, aeth)
  1433. __field(u32, psn)
  1434. __field(u32, req_psn)
  1435. __field(u32, resync_psn)
  1436. ),
  1437. TP_fast_assign(/* assign */
  1438. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  1439. __entry->qpn = qp->ibqp.qp_num;
  1440. __entry->aeth = aeth;
  1441. __entry->psn = psn;
  1442. __entry->req_psn = req_psn;
  1443. __entry->resync_psn = resync_psn;
  1444. ),
  1445. TP_printk(/* print */
  1446. "[%s] qpn 0x%x aeth 0x%x psn 0x%x req_psn 0x%x resync_psn 0x%x",
  1447. __get_str(dev),
  1448. __entry->qpn,
  1449. __entry->aeth,
  1450. __entry->psn,
  1451. __entry->req_psn,
  1452. __entry->resync_psn
  1453. )
  1454. );
  1455. DEFINE_EVENT(/* rcv_tid_ack */
  1456. hfi1_tid_ack_template, hfi1_rcv_tid_ack,
  1457. TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
  1458. u32 req_psn, u32 resync_psn),
  1459. TP_ARGS(qp, aeth, psn, req_psn, resync_psn)
  1460. );
  1461. DECLARE_EVENT_CLASS(/* kdeth_eflags_error */
  1462. hfi1_kdeth_eflags_error_template,
  1463. TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
  1464. TP_ARGS(qp, rcv_type, rte, psn),
  1465. TP_STRUCT__entry(/* entry */
  1466. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  1467. __field(u32, qpn)
  1468. __field(u8, rcv_type)
  1469. __field(u8, rte)
  1470. __field(u32, psn)
  1471. ),
  1472. TP_fast_assign(/* assign */
  1473. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  1474. __entry->qpn = qp->ibqp.qp_num;
  1475. __entry->rcv_type = rcv_type;
  1476. __entry->rte = rte;
  1477. __entry->psn = psn;
  1478. ),
  1479. TP_printk(/* print */
  1480. KDETH_EFLAGS_ERR_PRN,
  1481. __get_str(dev),
  1482. __entry->qpn,
  1483. __entry->rcv_type,
  1484. __entry->rte,
  1485. __entry->psn
  1486. )
  1487. );
  1488. DEFINE_EVENT(/* event */
  1489. hfi1_kdeth_eflags_error_template, hfi1_eflags_err_write,
  1490. TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
  1491. TP_ARGS(qp, rcv_type, rte, psn)
  1492. );
  1493. #endif /* __HFI1_TRACE_TID_H */
  1494. #undef TRACE_INCLUDE_PATH
  1495. #undef TRACE_INCLUDE_FILE
  1496. #define TRACE_INCLUDE_PATH .
  1497. #define TRACE_INCLUDE_FILE trace_tid
  1498. #include <trace/define_trace.h>