trace_tx.h 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065
  1. /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
  2. /*
  3. * Copyright(c) 2015 - 2017 Intel Corporation.
  4. */
  5. #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
  6. #define __HFI1_TRACE_TX_H
  7. #include <linux/tracepoint.h>
  8. #include <linux/trace_seq.h>
  9. #include "hfi.h"
  10. #include "mad.h"
  11. #include "sdma.h"
  12. #include "ipoib.h"
  13. #include "user_sdma.h"
  14. const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
  15. #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
  16. #undef TRACE_SYSTEM
  17. #define TRACE_SYSTEM hfi1_tx
  18. TRACE_EVENT(hfi1_piofree,
  19. TP_PROTO(struct send_context *sc, int extra),
  20. TP_ARGS(sc, extra),
  21. TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  22. __field(u32, sw_index)
  23. __field(u32, hw_context)
  24. __field(int, extra)
  25. ),
  26. TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  27. __entry->sw_index = sc->sw_index;
  28. __entry->hw_context = sc->hw_context;
  29. __entry->extra = extra;
  30. ),
  31. TP_printk("[%s] ctxt %u(%u) extra %d",
  32. __get_str(dev),
  33. __entry->sw_index,
  34. __entry->hw_context,
  35. __entry->extra
  36. )
  37. );
  38. TRACE_EVENT(hfi1_wantpiointr,
  39. TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
  40. TP_ARGS(sc, needint, credit_ctrl),
  41. TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
  42. __field(u32, sw_index)
  43. __field(u32, hw_context)
  44. __field(u32, needint)
  45. __field(u64, credit_ctrl)
  46. ),
  47. TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
  48. __entry->sw_index = sc->sw_index;
  49. __entry->hw_context = sc->hw_context;
  50. __entry->needint = needint;
  51. __entry->credit_ctrl = credit_ctrl;
  52. ),
  53. TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
  54. __get_str(dev),
  55. __entry->sw_index,
  56. __entry->hw_context,
  57. __entry->needint,
  58. (unsigned long long)__entry->credit_ctrl
  59. )
  60. );
  61. DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
  62. TP_PROTO(struct rvt_qp *qp, u32 flags),
  63. TP_ARGS(qp, flags),
  64. TP_STRUCT__entry(
  65. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  66. __field(u32, qpn)
  67. __field(u32, flags)
  68. __field(u32, s_flags)
  69. __field(u32, ps_flags)
  70. __field(unsigned long, iow_flags)
  71. ),
  72. TP_fast_assign(
  73. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  74. __entry->flags = flags;
  75. __entry->qpn = qp->ibqp.qp_num;
  76. __entry->s_flags = qp->s_flags;
  77. __entry->ps_flags =
  78. ((struct hfi1_qp_priv *)qp->priv)->s_flags;
  79. __entry->iow_flags =
  80. ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
  81. ),
  82. TP_printk(
  83. "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
  84. __get_str(dev),
  85. __entry->qpn,
  86. __entry->flags,
  87. __entry->s_flags,
  88. __entry->ps_flags,
  89. __entry->iow_flags
  90. )
  91. );
  92. DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
  93. TP_PROTO(struct rvt_qp *qp, u32 flags),
  94. TP_ARGS(qp, flags));
  95. DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
  96. TP_PROTO(struct rvt_qp *qp, u32 flags),
  97. TP_ARGS(qp, flags));
  98. TRACE_EVENT(hfi1_sdma_descriptor,
  99. TP_PROTO(struct sdma_engine *sde,
  100. u64 desc0,
  101. u64 desc1,
  102. u16 e,
  103. void *descp),
  104. TP_ARGS(sde, desc0, desc1, e, descp),
  105. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  106. __field(void *, descp)
  107. __field(u64, desc0)
  108. __field(u64, desc1)
  109. __field(u16, e)
  110. __field(u8, idx)
  111. ),
  112. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  113. __entry->desc0 = desc0;
  114. __entry->desc1 = desc1;
  115. __entry->idx = sde->this_idx;
  116. __entry->descp = descp;
  117. __entry->e = e;
  118. ),
  119. TP_printk(
  120. "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
  121. __get_str(dev),
  122. __entry->idx,
  123. __parse_sdma_flags(__entry->desc0, __entry->desc1),
  124. (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
  125. SDMA_DESC0_PHY_ADDR_MASK,
  126. (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
  127. SDMA_DESC1_GENERATION_MASK),
  128. (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
  129. SDMA_DESC0_BYTE_COUNT_MASK),
  130. __entry->desc0,
  131. __entry->desc1,
  132. __entry->descp,
  133. __entry->e
  134. )
  135. );
  136. TRACE_EVENT(hfi1_sdma_engine_select,
  137. TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
  138. TP_ARGS(dd, sel, vl, idx),
  139. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  140. __field(u32, sel)
  141. __field(u8, vl)
  142. __field(u8, idx)
  143. ),
  144. TP_fast_assign(DD_DEV_ASSIGN(dd);
  145. __entry->sel = sel;
  146. __entry->vl = vl;
  147. __entry->idx = idx;
  148. ),
  149. TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
  150. __get_str(dev),
  151. __entry->idx,
  152. __entry->sel,
  153. __entry->vl
  154. )
  155. );
  156. TRACE_EVENT(hfi1_sdma_user_free_queues,
  157. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
  158. TP_ARGS(dd, ctxt, subctxt),
  159. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  160. __field(u16, ctxt)
  161. __field(u16, subctxt)
  162. ),
  163. TP_fast_assign(DD_DEV_ASSIGN(dd);
  164. __entry->ctxt = ctxt;
  165. __entry->subctxt = subctxt;
  166. ),
  167. TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
  168. __get_str(dev),
  169. __entry->ctxt,
  170. __entry->subctxt
  171. )
  172. );
  173. TRACE_EVENT(hfi1_sdma_user_process_request,
  174. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  175. u16 comp_idx),
  176. TP_ARGS(dd, ctxt, subctxt, comp_idx),
  177. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  178. __field(u16, ctxt)
  179. __field(u16, subctxt)
  180. __field(u16, comp_idx)
  181. ),
  182. TP_fast_assign(DD_DEV_ASSIGN(dd);
  183. __entry->ctxt = ctxt;
  184. __entry->subctxt = subctxt;
  185. __entry->comp_idx = comp_idx;
  186. ),
  187. TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
  188. __get_str(dev),
  189. __entry->ctxt,
  190. __entry->subctxt,
  191. __entry->comp_idx
  192. )
  193. );
  194. DECLARE_EVENT_CLASS(
  195. hfi1_sdma_value_template,
  196. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
  197. u32 value),
  198. TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
  199. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  200. __field(u16, ctxt)
  201. __field(u16, subctxt)
  202. __field(u16, comp_idx)
  203. __field(u32, value)
  204. ),
  205. TP_fast_assign(DD_DEV_ASSIGN(dd);
  206. __entry->ctxt = ctxt;
  207. __entry->subctxt = subctxt;
  208. __entry->comp_idx = comp_idx;
  209. __entry->value = value;
  210. ),
  211. TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
  212. __get_str(dev),
  213. __entry->ctxt,
  214. __entry->subctxt,
  215. __entry->comp_idx,
  216. __entry->value
  217. )
  218. );
  219. DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
  220. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  221. u16 comp_idx, u32 tidoffset),
  222. TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
  223. DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
  224. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  225. u16 comp_idx, u32 data_len),
  226. TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
  227. DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
  228. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  229. u16 comp_idx, u32 data_len),
  230. TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
  231. TRACE_EVENT(hfi1_sdma_user_tid_info,
  232. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  233. u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
  234. TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
  235. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  236. __field(u16, ctxt)
  237. __field(u16, subctxt)
  238. __field(u16, comp_idx)
  239. __field(u32, tidoffset)
  240. __field(u32, units)
  241. __field(u8, shift)
  242. ),
  243. TP_fast_assign(DD_DEV_ASSIGN(dd);
  244. __entry->ctxt = ctxt;
  245. __entry->subctxt = subctxt;
  246. __entry->comp_idx = comp_idx;
  247. __entry->tidoffset = tidoffset;
  248. __entry->units = units;
  249. __entry->shift = shift;
  250. ),
  251. TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
  252. __get_str(dev),
  253. __entry->ctxt,
  254. __entry->subctxt,
  255. __entry->comp_idx,
  256. __entry->tidoffset,
  257. __entry->units,
  258. __entry->shift
  259. )
  260. );
  261. TRACE_EVENT(hfi1_sdma_request,
  262. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
  263. unsigned long dim),
  264. TP_ARGS(dd, ctxt, subctxt, dim),
  265. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  266. __field(u16, ctxt)
  267. __field(u16, subctxt)
  268. __field(unsigned long, dim)
  269. ),
  270. TP_fast_assign(DD_DEV_ASSIGN(dd);
  271. __entry->ctxt = ctxt;
  272. __entry->subctxt = subctxt;
  273. __entry->dim = dim;
  274. ),
  275. TP_printk("[%s] SDMA from %u:%u (%lu)",
  276. __get_str(dev),
  277. __entry->ctxt,
  278. __entry->subctxt,
  279. __entry->dim
  280. )
  281. );
  282. DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
  283. TP_PROTO(struct sdma_engine *sde, u64 status),
  284. TP_ARGS(sde, status),
  285. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  286. __field(u64, status)
  287. __field(u8, idx)
  288. ),
  289. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  290. __entry->status = status;
  291. __entry->idx = sde->this_idx;
  292. ),
  293. TP_printk("[%s] SDE(%u) status %llx",
  294. __get_str(dev),
  295. __entry->idx,
  296. (unsigned long long)__entry->status
  297. )
  298. );
  299. DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
  300. TP_PROTO(struct sdma_engine *sde, u64 status),
  301. TP_ARGS(sde, status)
  302. );
  303. DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
  304. TP_PROTO(struct sdma_engine *sde, u64 status),
  305. TP_ARGS(sde, status)
  306. );
  307. DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
  308. TP_PROTO(struct sdma_engine *sde, int aidx),
  309. TP_ARGS(sde, aidx),
  310. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  311. __field(int, aidx)
  312. __field(u8, idx)
  313. ),
  314. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  315. __entry->idx = sde->this_idx;
  316. __entry->aidx = aidx;
  317. ),
  318. TP_printk("[%s] SDE(%u) aidx %d",
  319. __get_str(dev),
  320. __entry->idx,
  321. __entry->aidx
  322. )
  323. );
  324. DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
  325. TP_PROTO(struct sdma_engine *sde, int aidx),
  326. TP_ARGS(sde, aidx));
  327. DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
  328. TP_PROTO(struct sdma_engine *sde, int aidx),
  329. TP_ARGS(sde, aidx));
  330. #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
  331. TRACE_EVENT(hfi1_sdma_progress,
  332. TP_PROTO(struct sdma_engine *sde,
  333. u16 hwhead,
  334. u16 swhead,
  335. struct sdma_txreq *txp
  336. ),
  337. TP_ARGS(sde, hwhead, swhead, txp),
  338. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  339. __field(u64, sn)
  340. __field(u16, hwhead)
  341. __field(u16, swhead)
  342. __field(u16, txnext)
  343. __field(u16, tx_tail)
  344. __field(u16, tx_head)
  345. __field(u8, idx)
  346. ),
  347. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  348. __entry->hwhead = hwhead;
  349. __entry->swhead = swhead;
  350. __entry->tx_tail = sde->tx_tail;
  351. __entry->tx_head = sde->tx_head;
  352. __entry->txnext = txp ? txp->next_descq_idx : ~0;
  353. __entry->idx = sde->this_idx;
  354. __entry->sn = txp ? txp->sn : ~0;
  355. ),
  356. TP_printk(
  357. "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
  358. __get_str(dev),
  359. __entry->idx,
  360. __entry->sn,
  361. __entry->hwhead,
  362. __entry->swhead,
  363. __entry->txnext,
  364. __entry->tx_head,
  365. __entry->tx_tail
  366. )
  367. );
  368. #else
  369. TRACE_EVENT(hfi1_sdma_progress,
  370. TP_PROTO(struct sdma_engine *sde,
  371. u16 hwhead, u16 swhead,
  372. struct sdma_txreq *txp
  373. ),
  374. TP_ARGS(sde, hwhead, swhead, txp),
  375. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  376. __field(u16, hwhead)
  377. __field(u16, swhead)
  378. __field(u16, txnext)
  379. __field(u16, tx_tail)
  380. __field(u16, tx_head)
  381. __field(u8, idx)
  382. ),
  383. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  384. __entry->hwhead = hwhead;
  385. __entry->swhead = swhead;
  386. __entry->tx_tail = sde->tx_tail;
  387. __entry->tx_head = sde->tx_head;
  388. __entry->txnext = txp ? txp->next_descq_idx : ~0;
  389. __entry->idx = sde->this_idx;
  390. ),
  391. TP_printk(
  392. "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
  393. __get_str(dev),
  394. __entry->idx,
  395. __entry->hwhead,
  396. __entry->swhead,
  397. __entry->txnext,
  398. __entry->tx_head,
  399. __entry->tx_tail
  400. )
  401. );
  402. #endif
  403. DECLARE_EVENT_CLASS(hfi1_sdma_sn,
  404. TP_PROTO(struct sdma_engine *sde, u64 sn),
  405. TP_ARGS(sde, sn),
  406. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  407. __field(u64, sn)
  408. __field(u8, idx)
  409. ),
  410. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  411. __entry->sn = sn;
  412. __entry->idx = sde->this_idx;
  413. ),
  414. TP_printk("[%s] SDE(%u) sn %llu",
  415. __get_str(dev),
  416. __entry->idx,
  417. __entry->sn
  418. )
  419. );
  420. DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
  421. TP_PROTO(
  422. struct sdma_engine *sde,
  423. u64 sn
  424. ),
  425. TP_ARGS(sde, sn)
  426. );
  427. DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
  428. TP_PROTO(struct sdma_engine *sde, u64 sn),
  429. TP_ARGS(sde, sn)
  430. );
  431. #define USDMA_HDR_FORMAT \
  432. "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
  433. TRACE_EVENT(hfi1_sdma_user_header,
  434. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
  435. struct hfi1_pkt_header *hdr, u32 tidval),
  436. TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
  437. TP_STRUCT__entry(
  438. DD_DEV_ENTRY(dd)
  439. __field(u16, ctxt)
  440. __field(u8, subctxt)
  441. __field(u16, req)
  442. __field(u32, pbc0)
  443. __field(u32, pbc1)
  444. __field(u32, lrh0)
  445. __field(u32, lrh1)
  446. __field(u32, bth0)
  447. __field(u32, bth1)
  448. __field(u32, bth2)
  449. __field(u32, kdeth0)
  450. __field(u32, kdeth1)
  451. __field(u32, kdeth2)
  452. __field(u32, kdeth3)
  453. __field(u32, kdeth4)
  454. __field(u32, kdeth5)
  455. __field(u32, kdeth6)
  456. __field(u32, kdeth7)
  457. __field(u32, kdeth8)
  458. __field(u32, tidval)
  459. ),
  460. TP_fast_assign(
  461. __le32 *pbc = (__le32 *)hdr->pbc;
  462. __be32 *lrh = (__be32 *)hdr->lrh;
  463. __be32 *bth = (__be32 *)hdr->bth;
  464. __le32 *kdeth = (__le32 *)&hdr->kdeth;
  465. DD_DEV_ASSIGN(dd);
  466. __entry->ctxt = ctxt;
  467. __entry->subctxt = subctxt;
  468. __entry->req = req;
  469. __entry->pbc0 = le32_to_cpu(pbc[0]);
  470. __entry->pbc1 = le32_to_cpu(pbc[1]);
  471. __entry->lrh0 = be32_to_cpu(lrh[0]);
  472. __entry->lrh1 = be32_to_cpu(lrh[1]);
  473. __entry->bth0 = be32_to_cpu(bth[0]);
  474. __entry->bth1 = be32_to_cpu(bth[1]);
  475. __entry->bth2 = be32_to_cpu(bth[2]);
  476. __entry->kdeth0 = le32_to_cpu(kdeth[0]);
  477. __entry->kdeth1 = le32_to_cpu(kdeth[1]);
  478. __entry->kdeth2 = le32_to_cpu(kdeth[2]);
  479. __entry->kdeth3 = le32_to_cpu(kdeth[3]);
  480. __entry->kdeth4 = le32_to_cpu(kdeth[4]);
  481. __entry->kdeth5 = le32_to_cpu(kdeth[5]);
  482. __entry->kdeth6 = le32_to_cpu(kdeth[6]);
  483. __entry->kdeth7 = le32_to_cpu(kdeth[7]);
  484. __entry->kdeth8 = le32_to_cpu(kdeth[8]);
  485. __entry->tidval = tidval;
  486. ),
  487. TP_printk(USDMA_HDR_FORMAT,
  488. __get_str(dev),
  489. __entry->ctxt,
  490. __entry->subctxt,
  491. __entry->req,
  492. __entry->pbc1,
  493. __entry->pbc0,
  494. __entry->lrh0,
  495. __entry->lrh1,
  496. __entry->bth0,
  497. __entry->bth1,
  498. __entry->bth2,
  499. __entry->kdeth0,
  500. __entry->kdeth1,
  501. __entry->kdeth2,
  502. __entry->kdeth3,
  503. __entry->kdeth4,
  504. __entry->kdeth5,
  505. __entry->kdeth6,
  506. __entry->kdeth7,
  507. __entry->kdeth8,
  508. __entry->tidval
  509. )
  510. );
  511. #define SDMA_UREQ_FMT \
  512. "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
  513. TRACE_EVENT(hfi1_sdma_user_reqinfo,
  514. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
  515. TP_ARGS(dd, ctxt, subctxt, i),
  516. TP_STRUCT__entry(
  517. DD_DEV_ENTRY(dd)
  518. __field(u16, ctxt)
  519. __field(u8, subctxt)
  520. __field(u8, ver_opcode)
  521. __field(u8, iovcnt)
  522. __field(u16, npkts)
  523. __field(u16, fragsize)
  524. __field(u16, comp_idx)
  525. ),
  526. TP_fast_assign(
  527. DD_DEV_ASSIGN(dd);
  528. __entry->ctxt = ctxt;
  529. __entry->subctxt = subctxt;
  530. __entry->ver_opcode = i[0] & 0xff;
  531. __entry->iovcnt = (i[0] >> 8) & 0xff;
  532. __entry->npkts = i[1];
  533. __entry->fragsize = i[2];
  534. __entry->comp_idx = i[3];
  535. ),
  536. TP_printk(SDMA_UREQ_FMT,
  537. __get_str(dev),
  538. __entry->ctxt,
  539. __entry->subctxt,
  540. __entry->ver_opcode,
  541. __entry->iovcnt,
  542. __entry->npkts,
  543. __entry->fragsize,
  544. __entry->comp_idx
  545. )
  546. );
  547. #define usdma_complete_name(st) { st, #st }
  548. #define show_usdma_complete_state(st) \
  549. __print_symbolic(st, \
  550. usdma_complete_name(FREE), \
  551. usdma_complete_name(QUEUED), \
  552. usdma_complete_name(COMPLETE), \
  553. usdma_complete_name(ERROR))
  554. TRACE_EVENT(hfi1_sdma_user_completion,
  555. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
  556. u8 state, int code),
  557. TP_ARGS(dd, ctxt, subctxt, idx, state, code),
  558. TP_STRUCT__entry(
  559. DD_DEV_ENTRY(dd)
  560. __field(u16, ctxt)
  561. __field(u8, subctxt)
  562. __field(u16, idx)
  563. __field(u8, state)
  564. __field(int, code)
  565. ),
  566. TP_fast_assign(
  567. DD_DEV_ASSIGN(dd);
  568. __entry->ctxt = ctxt;
  569. __entry->subctxt = subctxt;
  570. __entry->idx = idx;
  571. __entry->state = state;
  572. __entry->code = code;
  573. ),
  574. TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
  575. __get_str(dev), __entry->ctxt, __entry->subctxt,
  576. __entry->idx, show_usdma_complete_state(__entry->state),
  577. __entry->code)
  578. );
  579. TRACE_EVENT(hfi1_usdma_defer,
  580. TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
  581. struct sdma_engine *sde,
  582. struct iowait *wait),
  583. TP_ARGS(pq, sde, wait),
  584. TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
  585. __field(struct hfi1_user_sdma_pkt_q *, pq)
  586. __field(struct sdma_engine *, sde)
  587. __field(struct iowait *, wait)
  588. __field(int, engine)
  589. __field(int, empty)
  590. ),
  591. TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
  592. __entry->pq = pq;
  593. __entry->sde = sde;
  594. __entry->wait = wait;
  595. __entry->engine = sde->this_idx;
  596. __entry->empty = list_empty(&__entry->wait->list);
  597. ),
  598. TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
  599. __get_str(dev),
  600. (unsigned long long)__entry->pq,
  601. (unsigned long long)__entry->sde,
  602. (unsigned long long)__entry->wait,
  603. __entry->engine,
  604. __entry->empty
  605. )
  606. );
  607. TRACE_EVENT(hfi1_usdma_activate,
  608. TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
  609. struct iowait *wait,
  610. int reason),
  611. TP_ARGS(pq, wait, reason),
  612. TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
  613. __field(struct hfi1_user_sdma_pkt_q *, pq)
  614. __field(struct iowait *, wait)
  615. __field(int, reason)
  616. ),
  617. TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
  618. __entry->pq = pq;
  619. __entry->wait = wait;
  620. __entry->reason = reason;
  621. ),
  622. TP_printk("[%s] pq %llx wait %llx reason %d",
  623. __get_str(dev),
  624. (unsigned long long)__entry->pq,
  625. (unsigned long long)__entry->wait,
  626. __entry->reason
  627. )
  628. );
  629. TRACE_EVENT(hfi1_usdma_we,
  630. TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
  631. int we_ret),
  632. TP_ARGS(pq, we_ret),
  633. TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
  634. __field(struct hfi1_user_sdma_pkt_q *, pq)
  635. __field(int, state)
  636. __field(int, we_ret)
  637. ),
  638. TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
  639. __entry->pq = pq;
  640. __entry->state = pq->state;
  641. __entry->we_ret = we_ret;
  642. ),
  643. TP_printk("[%s] pq %llx state %d we_ret %d",
  644. __get_str(dev),
  645. (unsigned long long)__entry->pq,
  646. __entry->state,
  647. __entry->we_ret
  648. )
  649. );
  650. const char *print_u32_array(struct trace_seq *, u32 *, int);
  651. #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
  652. TRACE_EVENT(hfi1_sdma_user_header_ahg,
  653. TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
  654. u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
  655. TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
  656. TP_STRUCT__entry(
  657. DD_DEV_ENTRY(dd)
  658. __field(u16, ctxt)
  659. __field(u8, subctxt)
  660. __field(u16, req)
  661. __field(u8, sde)
  662. __field(u8, idx)
  663. __field(int, len)
  664. __field(u32, tidval)
  665. __array(u32, ahg, 10)
  666. ),
  667. TP_fast_assign(
  668. DD_DEV_ASSIGN(dd);
  669. __entry->ctxt = ctxt;
  670. __entry->subctxt = subctxt;
  671. __entry->req = req;
  672. __entry->sde = sde;
  673. __entry->idx = ahgidx;
  674. __entry->len = len;
  675. __entry->tidval = tidval;
  676. memcpy(__entry->ahg, ahg, len * sizeof(u32));
  677. ),
  678. TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
  679. __get_str(dev),
  680. __entry->ctxt,
  681. __entry->subctxt,
  682. __entry->req,
  683. __entry->sde,
  684. __entry->idx,
  685. __entry->len - 1,
  686. __print_u32_hex(__entry->ahg, __entry->len),
  687. __entry->tidval
  688. )
  689. );
  690. TRACE_EVENT(hfi1_sdma_state,
  691. TP_PROTO(struct sdma_engine *sde,
  692. const char *cstate,
  693. const char *nstate
  694. ),
  695. TP_ARGS(sde, cstate, nstate),
  696. TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
  697. __string(curstate, cstate)
  698. __string(newstate, nstate)
  699. ),
  700. TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
  701. __assign_str(curstate, cstate);
  702. __assign_str(newstate, nstate);
  703. ),
  704. TP_printk("[%s] current state %s new state %s",
  705. __get_str(dev),
  706. __get_str(curstate),
  707. __get_str(newstate)
  708. )
  709. );
  710. #define BCT_FORMAT \
  711. "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
  712. #define BCT(field) \
  713. be16_to_cpu( \
  714. ((struct buffer_control *)__get_dynamic_array(bct))->field \
  715. )
  716. DECLARE_EVENT_CLASS(hfi1_bct_template,
  717. TP_PROTO(struct hfi1_devdata *dd,
  718. struct buffer_control *bc),
  719. TP_ARGS(dd, bc),
  720. TP_STRUCT__entry(DD_DEV_ENTRY(dd)
  721. __dynamic_array(u8, bct, sizeof(*bc))
  722. ),
  723. TP_fast_assign(DD_DEV_ASSIGN(dd);
  724. memcpy(__get_dynamic_array(bct), bc,
  725. sizeof(*bc));
  726. ),
  727. TP_printk(BCT_FORMAT,
  728. BCT(overall_shared_limit),
  729. BCT(vl[0].dedicated),
  730. BCT(vl[0].shared),
  731. BCT(vl[1].dedicated),
  732. BCT(vl[1].shared),
  733. BCT(vl[2].dedicated),
  734. BCT(vl[2].shared),
  735. BCT(vl[3].dedicated),
  736. BCT(vl[3].shared),
  737. BCT(vl[4].dedicated),
  738. BCT(vl[4].shared),
  739. BCT(vl[5].dedicated),
  740. BCT(vl[5].shared),
  741. BCT(vl[6].dedicated),
  742. BCT(vl[6].shared),
  743. BCT(vl[7].dedicated),
  744. BCT(vl[7].shared),
  745. BCT(vl[15].dedicated),
  746. BCT(vl[15].shared)
  747. )
  748. );
  749. DEFINE_EVENT(hfi1_bct_template, bct_set,
  750. TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
  751. TP_ARGS(dd, bc));
  752. DEFINE_EVENT(hfi1_bct_template, bct_get,
  753. TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
  754. TP_ARGS(dd, bc));
  755. TRACE_EVENT(
  756. hfi1_qp_send_completion,
  757. TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
  758. TP_ARGS(qp, wqe, idx),
  759. TP_STRUCT__entry(
  760. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  761. __field(struct rvt_swqe *, wqe)
  762. __field(u64, wr_id)
  763. __field(u32, qpn)
  764. __field(u32, qpt)
  765. __field(u32, length)
  766. __field(u32, idx)
  767. __field(u32, ssn)
  768. __field(enum ib_wr_opcode, opcode)
  769. __field(int, send_flags)
  770. ),
  771. TP_fast_assign(
  772. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  773. __entry->wqe = wqe;
  774. __entry->wr_id = wqe->wr.wr_id;
  775. __entry->qpn = qp->ibqp.qp_num;
  776. __entry->qpt = qp->ibqp.qp_type;
  777. __entry->length = wqe->length;
  778. __entry->idx = idx;
  779. __entry->ssn = wqe->ssn;
  780. __entry->opcode = wqe->wr.opcode;
  781. __entry->send_flags = wqe->wr.send_flags;
  782. ),
  783. TP_printk(
  784. "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
  785. __get_str(dev),
  786. __entry->qpn,
  787. __entry->qpt,
  788. __entry->wqe,
  789. __entry->idx,
  790. __entry->wr_id,
  791. __entry->length,
  792. __entry->ssn,
  793. __entry->opcode,
  794. __entry->send_flags
  795. )
  796. );
  797. DECLARE_EVENT_CLASS(
  798. hfi1_do_send_template,
  799. TP_PROTO(struct rvt_qp *qp, bool flag),
  800. TP_ARGS(qp, flag),
  801. TP_STRUCT__entry(
  802. DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
  803. __field(u32, qpn)
  804. __field(bool, flag)
  805. ),
  806. TP_fast_assign(
  807. DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
  808. __entry->qpn = qp->ibqp.qp_num;
  809. __entry->flag = flag;
  810. ),
  811. TP_printk(
  812. "[%s] qpn %x flag %d",
  813. __get_str(dev),
  814. __entry->qpn,
  815. __entry->flag
  816. )
  817. );
  818. DEFINE_EVENT(
  819. hfi1_do_send_template, hfi1_rc_do_send,
  820. TP_PROTO(struct rvt_qp *qp, bool flag),
  821. TP_ARGS(qp, flag)
  822. );
  823. DEFINE_EVENT(/* event */
  824. hfi1_do_send_template, hfi1_rc_do_tid_send,
  825. TP_PROTO(struct rvt_qp *qp, bool flag),
  826. TP_ARGS(qp, flag)
  827. );
  828. DEFINE_EVENT(
  829. hfi1_do_send_template, hfi1_rc_expired_time_slice,
  830. TP_PROTO(struct rvt_qp *qp, bool flag),
  831. TP_ARGS(qp, flag)
  832. );
  833. DECLARE_EVENT_CLASS(/* AIP */
  834. hfi1_ipoib_txq_template,
  835. TP_PROTO(struct hfi1_ipoib_txq *txq),
  836. TP_ARGS(txq),
  837. TP_STRUCT__entry(/* entry */
  838. DD_DEV_ENTRY(txq->priv->dd)
  839. __field(struct hfi1_ipoib_txq *, txq)
  840. __field(struct sdma_engine *, sde)
  841. __field(ulong, head)
  842. __field(ulong, tail)
  843. __field(uint, used)
  844. __field(uint, flow)
  845. __field(int, stops)
  846. __field(int, no_desc)
  847. __field(u8, idx)
  848. __field(u8, stopped)
  849. ),
  850. TP_fast_assign(/* assign */
  851. DD_DEV_ASSIGN(txq->priv->dd);
  852. __entry->txq = txq;
  853. __entry->sde = txq->sde;
  854. __entry->head = txq->tx_ring.head;
  855. __entry->tail = txq->tx_ring.tail;
  856. __entry->idx = txq->q_idx;
  857. __entry->used =
  858. txq->tx_ring.sent_txreqs -
  859. txq->tx_ring.complete_txreqs;
  860. __entry->flow = txq->flow.as_int;
  861. __entry->stops = atomic_read(&txq->tx_ring.stops);
  862. __entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
  863. __entry->stopped =
  864. __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
  865. ),
  866. TP_printk(/* print */
  867. "[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
  868. __get_str(dev),
  869. (unsigned long long)__entry->txq,
  870. __entry->idx,
  871. (unsigned long long)__entry->sde,
  872. __entry->sde ? __entry->sde->this_idx : 0,
  873. __entry->sde ? __entry->sde->cpu : 0,
  874. __entry->head,
  875. __entry->tail,
  876. __entry->flow,
  877. __entry->used,
  878. __entry->stops,
  879. __entry->no_desc,
  880. __entry->stopped
  881. )
  882. );
  883. DEFINE_EVENT(/* queue stop */
  884. hfi1_ipoib_txq_template, hfi1_txq_stop,
  885. TP_PROTO(struct hfi1_ipoib_txq *txq),
  886. TP_ARGS(txq)
  887. );
  888. DEFINE_EVENT(/* queue wake */
  889. hfi1_ipoib_txq_template, hfi1_txq_wake,
  890. TP_PROTO(struct hfi1_ipoib_txq *txq),
  891. TP_ARGS(txq)
  892. );
  893. DEFINE_EVENT(/* flow flush */
  894. hfi1_ipoib_txq_template, hfi1_flow_flush,
  895. TP_PROTO(struct hfi1_ipoib_txq *txq),
  896. TP_ARGS(txq)
  897. );
  898. DEFINE_EVENT(/* flow switch */
  899. hfi1_ipoib_txq_template, hfi1_flow_switch,
  900. TP_PROTO(struct hfi1_ipoib_txq *txq),
  901. TP_ARGS(txq)
  902. );
  903. DEFINE_EVENT(/* wakeup */
  904. hfi1_ipoib_txq_template, hfi1_txq_wakeup,
  905. TP_PROTO(struct hfi1_ipoib_txq *txq),
  906. TP_ARGS(txq)
  907. );
  908. DEFINE_EVENT(/* full */
  909. hfi1_ipoib_txq_template, hfi1_txq_full,
  910. TP_PROTO(struct hfi1_ipoib_txq *txq),
  911. TP_ARGS(txq)
  912. );
  913. DEFINE_EVENT(/* queued */
  914. hfi1_ipoib_txq_template, hfi1_txq_queued,
  915. TP_PROTO(struct hfi1_ipoib_txq *txq),
  916. TP_ARGS(txq)
  917. );
  918. DEFINE_EVENT(/* xmit_stopped */
  919. hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
  920. TP_PROTO(struct hfi1_ipoib_txq *txq),
  921. TP_ARGS(txq)
  922. );
  923. DEFINE_EVENT(/* xmit_unstopped */
  924. hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
  925. TP_PROTO(struct hfi1_ipoib_txq *txq),
  926. TP_ARGS(txq)
  927. );
  928. DECLARE_EVENT_CLASS(/* AIP */
  929. hfi1_ipoib_tx_template,
  930. TP_PROTO(struct ipoib_txreq *tx, u32 idx),
  931. TP_ARGS(tx, idx),
  932. TP_STRUCT__entry(/* entry */
  933. DD_DEV_ENTRY(tx->txq->priv->dd)
  934. __field(struct ipoib_txreq *, tx)
  935. __field(struct hfi1_ipoib_txq *, txq)
  936. __field(struct sk_buff *, skb)
  937. __field(ulong, idx)
  938. ),
  939. TP_fast_assign(/* assign */
  940. DD_DEV_ASSIGN(tx->txq->priv->dd);
  941. __entry->tx = tx;
  942. __entry->skb = tx->skb;
  943. __entry->txq = tx->txq;
  944. __entry->idx = idx;
  945. ),
  946. TP_printk(/* print */
  947. "[%s] tx %llx txq %llx,%u skb %llx idx %lu",
  948. __get_str(dev),
  949. (unsigned long long)__entry->tx,
  950. (unsigned long long)__entry->txq,
  951. __entry->txq ? __entry->txq->q_idx : 0,
  952. (unsigned long long)__entry->skb,
  953. __entry->idx
  954. )
  955. );
  956. DEFINE_EVENT(/* produce */
  957. hfi1_ipoib_tx_template, hfi1_tx_produce,
  958. TP_PROTO(struct ipoib_txreq *tx, u32 idx),
  959. TP_ARGS(tx, idx)
  960. );
  961. DEFINE_EVENT(/* consume */
  962. hfi1_ipoib_tx_template, hfi1_tx_consume,
  963. TP_PROTO(struct ipoib_txreq *tx, u32 idx),
  964. TP_ARGS(tx, idx)
  965. );
  966. DEFINE_EVENT(/* alloc_tx */
  967. hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
  968. TP_PROTO(struct hfi1_ipoib_txq *txq),
  969. TP_ARGS(txq)
  970. );
  971. DEFINE_EVENT(/* poll */
  972. hfi1_ipoib_txq_template, hfi1_txq_poll,
  973. TP_PROTO(struct hfi1_ipoib_txq *txq),
  974. TP_ARGS(txq)
  975. );
  976. DEFINE_EVENT(/* complete */
  977. hfi1_ipoib_txq_template, hfi1_txq_complete,
  978. TP_PROTO(struct hfi1_ipoib_txq *txq),
  979. TP_ARGS(txq)
  980. );
  981. #endif /* __HFI1_TRACE_TX_H */
  982. #undef TRACE_INCLUDE_PATH
  983. #undef TRACE_INCLUDE_FILE
  984. #define TRACE_INCLUDE_PATH .
  985. #define TRACE_INCLUDE_FILE trace_tx
  986. #include <trace/define_trace.h>