rpcrdma.h 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2017, 2018 Oracle. All rights reserved.
  4. *
  5. * Trace point definitions for the "rpcrdma" subsystem.
  6. */
  7. #undef TRACE_SYSTEM
  8. #define TRACE_SYSTEM rpcrdma
  9. #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
  10. #define _TRACE_RPCRDMA_H
  11. #include <linux/scatterlist.h>
  12. #include <linux/sunrpc/rpc_rdma_cid.h>
  13. #include <linux/tracepoint.h>
  14. #include <rdma/ib_cm.h>
  15. #include <trace/events/rdma.h>
  16. #include <trace/events/sunrpc_base.h>
  17. /**
  18. ** Event classes
  19. **/
  20. DECLARE_EVENT_CLASS(rpcrdma_completion_class,
  21. TP_PROTO(
  22. const struct ib_wc *wc,
  23. const struct rpc_rdma_cid *cid
  24. ),
  25. TP_ARGS(wc, cid),
  26. TP_STRUCT__entry(
  27. __field(u32, cq_id)
  28. __field(int, completion_id)
  29. __field(unsigned long, status)
  30. __field(unsigned int, vendor_err)
  31. ),
  32. TP_fast_assign(
  33. __entry->cq_id = cid->ci_queue_id;
  34. __entry->completion_id = cid->ci_completion_id;
  35. __entry->status = wc->status;
  36. if (wc->status)
  37. __entry->vendor_err = wc->vendor_err;
  38. else
  39. __entry->vendor_err = 0;
  40. ),
  41. TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
  42. __entry->cq_id, __entry->completion_id,
  43. rdma_show_wc_status(__entry->status),
  44. __entry->status, __entry->vendor_err
  45. )
  46. );
  47. #define DEFINE_COMPLETION_EVENT(name) \
  48. DEFINE_EVENT(rpcrdma_completion_class, name, \
  49. TP_PROTO( \
  50. const struct ib_wc *wc, \
  51. const struct rpc_rdma_cid *cid \
  52. ), \
  53. TP_ARGS(wc, cid))
  54. DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
  55. TP_PROTO(
  56. const struct ib_wc *wc,
  57. const struct rpc_rdma_cid *cid
  58. ),
  59. TP_ARGS(wc, cid),
  60. TP_STRUCT__entry(
  61. __field(u32, cq_id)
  62. __field(int, completion_id)
  63. ),
  64. TP_fast_assign(
  65. __entry->cq_id = cid->ci_queue_id;
  66. __entry->completion_id = cid->ci_completion_id;
  67. ),
  68. TP_printk("cq.id=%u cid=%d",
  69. __entry->cq_id, __entry->completion_id
  70. )
  71. );
  72. #define DEFINE_SEND_COMPLETION_EVENT(name) \
  73. DEFINE_EVENT(rpcrdma_send_completion_class, name, \
  74. TP_PROTO( \
  75. const struct ib_wc *wc, \
  76. const struct rpc_rdma_cid *cid \
  77. ), \
  78. TP_ARGS(wc, cid))
  79. DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
  80. TP_PROTO(
  81. const struct ib_wc *wc,
  82. const struct rpc_rdma_cid *cid
  83. ),
  84. TP_ARGS(wc, cid),
  85. TP_STRUCT__entry(
  86. __field(u32, cq_id)
  87. __field(int, completion_id)
  88. __field(unsigned long, status)
  89. __field(unsigned int, vendor_err)
  90. ),
  91. TP_fast_assign(
  92. __entry->cq_id = cid->ci_queue_id;
  93. __entry->completion_id = cid->ci_completion_id;
  94. __entry->status = wc->status;
  95. __entry->vendor_err = wc->vendor_err;
  96. ),
  97. TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
  98. __entry->cq_id, __entry->completion_id,
  99. rdma_show_wc_status(__entry->status),
  100. __entry->status, __entry->vendor_err
  101. )
  102. );
  103. #define DEFINE_SEND_FLUSH_EVENT(name) \
  104. DEFINE_EVENT(rpcrdma_send_flush_class, name, \
  105. TP_PROTO( \
  106. const struct ib_wc *wc, \
  107. const struct rpc_rdma_cid *cid \
  108. ), \
  109. TP_ARGS(wc, cid))
  110. DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
  111. TP_PROTO(
  112. const struct ib_wc *wc,
  113. const struct rpc_rdma_cid *cid
  114. ),
  115. TP_ARGS(wc, cid),
  116. TP_STRUCT__entry(
  117. __field(u32, cq_id)
  118. __field(int, completion_id)
  119. __field(unsigned long, status)
  120. __field(unsigned int, vendor_err)
  121. ),
  122. TP_fast_assign(
  123. __entry->cq_id = cid->ci_queue_id;
  124. __entry->completion_id = cid->ci_completion_id;
  125. __entry->status = wc->status;
  126. if (wc->status)
  127. __entry->vendor_err = wc->vendor_err;
  128. else
  129. __entry->vendor_err = 0;
  130. ),
  131. TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
  132. __entry->cq_id, __entry->completion_id,
  133. rdma_show_wc_status(__entry->status),
  134. __entry->status, __entry->vendor_err
  135. )
  136. );
  137. #define DEFINE_MR_COMPLETION_EVENT(name) \
  138. DEFINE_EVENT(rpcrdma_mr_completion_class, name, \
  139. TP_PROTO( \
  140. const struct ib_wc *wc, \
  141. const struct rpc_rdma_cid *cid \
  142. ), \
  143. TP_ARGS(wc, cid))
  144. DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
  145. TP_PROTO(
  146. const struct ib_wc *wc,
  147. const struct rpc_rdma_cid *cid
  148. ),
  149. TP_ARGS(wc, cid),
  150. TP_STRUCT__entry(
  151. __field(u32, cq_id)
  152. __field(int, completion_id)
  153. __field(u32, received)
  154. __field(unsigned long, status)
  155. __field(unsigned int, vendor_err)
  156. ),
  157. TP_fast_assign(
  158. __entry->cq_id = cid->ci_queue_id;
  159. __entry->completion_id = cid->ci_completion_id;
  160. __entry->status = wc->status;
  161. if (wc->status) {
  162. __entry->received = 0;
  163. __entry->vendor_err = wc->vendor_err;
  164. } else {
  165. __entry->received = wc->byte_len;
  166. __entry->vendor_err = 0;
  167. }
  168. ),
  169. TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
  170. __entry->cq_id, __entry->completion_id,
  171. rdma_show_wc_status(__entry->status),
  172. __entry->status, __entry->vendor_err,
  173. __entry->received
  174. )
  175. );
  176. #define DEFINE_RECEIVE_COMPLETION_EVENT(name) \
  177. DEFINE_EVENT(rpcrdma_receive_completion_class, name, \
  178. TP_PROTO( \
  179. const struct ib_wc *wc, \
  180. const struct rpc_rdma_cid *cid \
  181. ), \
  182. TP_ARGS(wc, cid))
  183. DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
  184. TP_PROTO(
  185. const struct ib_wc *wc,
  186. const struct rpc_rdma_cid *cid
  187. ),
  188. TP_ARGS(wc, cid),
  189. TP_STRUCT__entry(
  190. __field(u32, cq_id)
  191. __field(int, completion_id)
  192. __field(u32, received)
  193. ),
  194. TP_fast_assign(
  195. __entry->cq_id = cid->ci_queue_id;
  196. __entry->completion_id = cid->ci_completion_id;
  197. __entry->received = wc->byte_len;
  198. ),
  199. TP_printk("cq.id=%u cid=%d received=%u",
  200. __entry->cq_id, __entry->completion_id,
  201. __entry->received
  202. )
  203. );
  204. #define DEFINE_RECEIVE_SUCCESS_EVENT(name) \
  205. DEFINE_EVENT(rpcrdma_receive_success_class, name, \
  206. TP_PROTO( \
  207. const struct ib_wc *wc, \
  208. const struct rpc_rdma_cid *cid \
  209. ), \
  210. TP_ARGS(wc, cid))
  211. DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
  212. TP_PROTO(
  213. const struct ib_wc *wc,
  214. const struct rpc_rdma_cid *cid
  215. ),
  216. TP_ARGS(wc, cid),
  217. TP_STRUCT__entry(
  218. __field(u32, cq_id)
  219. __field(int, completion_id)
  220. __field(unsigned long, status)
  221. __field(unsigned int, vendor_err)
  222. ),
  223. TP_fast_assign(
  224. __entry->cq_id = cid->ci_queue_id;
  225. __entry->completion_id = cid->ci_completion_id;
  226. __entry->status = wc->status;
  227. __entry->vendor_err = wc->vendor_err;
  228. ),
  229. TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
  230. __entry->cq_id, __entry->completion_id,
  231. rdma_show_wc_status(__entry->status),
  232. __entry->status, __entry->vendor_err
  233. )
  234. );
  235. #define DEFINE_RECEIVE_FLUSH_EVENT(name) \
  236. DEFINE_EVENT(rpcrdma_receive_flush_class, name, \
  237. TP_PROTO( \
  238. const struct ib_wc *wc, \
  239. const struct rpc_rdma_cid *cid \
  240. ), \
  241. TP_ARGS(wc, cid))
  242. DECLARE_EVENT_CLASS(xprtrdma_reply_class,
  243. TP_PROTO(
  244. const struct rpcrdma_rep *rep
  245. ),
  246. TP_ARGS(rep),
  247. TP_STRUCT__entry(
  248. __field(u32, xid)
  249. __field(u32, version)
  250. __field(u32, proc)
  251. __string(addr, rpcrdma_addrstr(rep->rr_rxprt))
  252. __string(port, rpcrdma_portstr(rep->rr_rxprt))
  253. ),
  254. TP_fast_assign(
  255. __entry->xid = be32_to_cpu(rep->rr_xid);
  256. __entry->version = be32_to_cpu(rep->rr_vers);
  257. __entry->proc = be32_to_cpu(rep->rr_proc);
  258. __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
  259. __assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
  260. ),
  261. TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
  262. __get_str(addr), __get_str(port),
  263. __entry->xid, __entry->version, __entry->proc
  264. )
  265. );
  266. #define DEFINE_REPLY_EVENT(name) \
  267. DEFINE_EVENT(xprtrdma_reply_class, \
  268. xprtrdma_reply_##name##_err, \
  269. TP_PROTO( \
  270. const struct rpcrdma_rep *rep \
  271. ), \
  272. TP_ARGS(rep))
  273. DECLARE_EVENT_CLASS(xprtrdma_rxprt,
  274. TP_PROTO(
  275. const struct rpcrdma_xprt *r_xprt
  276. ),
  277. TP_ARGS(r_xprt),
  278. TP_STRUCT__entry(
  279. __string(addr, rpcrdma_addrstr(r_xprt))
  280. __string(port, rpcrdma_portstr(r_xprt))
  281. ),
  282. TP_fast_assign(
  283. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  284. __assign_str(port, rpcrdma_portstr(r_xprt));
  285. ),
  286. TP_printk("peer=[%s]:%s",
  287. __get_str(addr), __get_str(port)
  288. )
  289. );
  290. #define DEFINE_RXPRT_EVENT(name) \
  291. DEFINE_EVENT(xprtrdma_rxprt, name, \
  292. TP_PROTO( \
  293. const struct rpcrdma_xprt *r_xprt \
  294. ), \
  295. TP_ARGS(r_xprt))
  296. DECLARE_EVENT_CLASS(xprtrdma_connect_class,
  297. TP_PROTO(
  298. const struct rpcrdma_xprt *r_xprt,
  299. int rc
  300. ),
  301. TP_ARGS(r_xprt, rc),
  302. TP_STRUCT__entry(
  303. __field(int, rc)
  304. __field(int, connect_status)
  305. __string(addr, rpcrdma_addrstr(r_xprt))
  306. __string(port, rpcrdma_portstr(r_xprt))
  307. ),
  308. TP_fast_assign(
  309. __entry->rc = rc;
  310. __entry->connect_status = r_xprt->rx_ep->re_connect_status;
  311. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  312. __assign_str(port, rpcrdma_portstr(r_xprt));
  313. ),
  314. TP_printk("peer=[%s]:%s rc=%d connection status=%d",
  315. __get_str(addr), __get_str(port),
  316. __entry->rc, __entry->connect_status
  317. )
  318. );
  319. #define DEFINE_CONN_EVENT(name) \
  320. DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \
  321. TP_PROTO( \
  322. const struct rpcrdma_xprt *r_xprt, \
  323. int rc \
  324. ), \
  325. TP_ARGS(r_xprt, rc))
  326. DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
  327. TP_PROTO(
  328. const struct rpc_task *task,
  329. unsigned int pos,
  330. struct rpcrdma_mr *mr,
  331. int nsegs
  332. ),
  333. TP_ARGS(task, pos, mr, nsegs),
  334. TP_STRUCT__entry(
  335. __field(unsigned int, task_id)
  336. __field(unsigned int, client_id)
  337. __field(unsigned int, pos)
  338. __field(int, nents)
  339. __field(u32, handle)
  340. __field(u32, length)
  341. __field(u64, offset)
  342. __field(int, nsegs)
  343. ),
  344. TP_fast_assign(
  345. __entry->task_id = task->tk_pid;
  346. __entry->client_id = task->tk_client->cl_clid;
  347. __entry->pos = pos;
  348. __entry->nents = mr->mr_nents;
  349. __entry->handle = mr->mr_handle;
  350. __entry->length = mr->mr_length;
  351. __entry->offset = mr->mr_offset;
  352. __entry->nsegs = nsegs;
  353. ),
  354. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
  355. " pos=%u %u@0x%016llx:0x%08x (%s)",
  356. __entry->task_id, __entry->client_id,
  357. __entry->pos, __entry->length,
  358. (unsigned long long)__entry->offset, __entry->handle,
  359. __entry->nents < __entry->nsegs ? "more" : "last"
  360. )
  361. );
  362. #define DEFINE_RDCH_EVENT(name) \
  363. DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
  364. TP_PROTO( \
  365. const struct rpc_task *task, \
  366. unsigned int pos, \
  367. struct rpcrdma_mr *mr, \
  368. int nsegs \
  369. ), \
  370. TP_ARGS(task, pos, mr, nsegs))
  371. DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
  372. TP_PROTO(
  373. const struct rpc_task *task,
  374. struct rpcrdma_mr *mr,
  375. int nsegs
  376. ),
  377. TP_ARGS(task, mr, nsegs),
  378. TP_STRUCT__entry(
  379. __field(unsigned int, task_id)
  380. __field(unsigned int, client_id)
  381. __field(int, nents)
  382. __field(u32, handle)
  383. __field(u32, length)
  384. __field(u64, offset)
  385. __field(int, nsegs)
  386. ),
  387. TP_fast_assign(
  388. __entry->task_id = task->tk_pid;
  389. __entry->client_id = task->tk_client->cl_clid;
  390. __entry->nents = mr->mr_nents;
  391. __entry->handle = mr->mr_handle;
  392. __entry->length = mr->mr_length;
  393. __entry->offset = mr->mr_offset;
  394. __entry->nsegs = nsegs;
  395. ),
  396. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
  397. " %u@0x%016llx:0x%08x (%s)",
  398. __entry->task_id, __entry->client_id,
  399. __entry->length, (unsigned long long)__entry->offset,
  400. __entry->handle,
  401. __entry->nents < __entry->nsegs ? "more" : "last"
  402. )
  403. );
  404. #define DEFINE_WRCH_EVENT(name) \
  405. DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
  406. TP_PROTO( \
  407. const struct rpc_task *task, \
  408. struct rpcrdma_mr *mr, \
  409. int nsegs \
  410. ), \
  411. TP_ARGS(task, mr, nsegs))
  412. TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
  413. TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
  414. TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
  415. TRACE_DEFINE_ENUM(DMA_NONE);
  416. #define xprtrdma_show_direction(x) \
  417. __print_symbolic(x, \
  418. { DMA_BIDIRECTIONAL, "BIDIR" }, \
  419. { DMA_TO_DEVICE, "TO_DEVICE" }, \
  420. { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
  421. { DMA_NONE, "NONE" })
  422. DECLARE_EVENT_CLASS(xprtrdma_mr_class,
  423. TP_PROTO(
  424. const struct rpcrdma_mr *mr
  425. ),
  426. TP_ARGS(mr),
  427. TP_STRUCT__entry(
  428. __field(unsigned int, task_id)
  429. __field(unsigned int, client_id)
  430. __field(u32, mr_id)
  431. __field(int, nents)
  432. __field(u32, handle)
  433. __field(u32, length)
  434. __field(u64, offset)
  435. __field(u32, dir)
  436. ),
  437. TP_fast_assign(
  438. const struct rpcrdma_req *req = mr->mr_req;
  439. if (req) {
  440. const struct rpc_task *task = req->rl_slot.rq_task;
  441. __entry->task_id = task->tk_pid;
  442. __entry->client_id = task->tk_client->cl_clid;
  443. } else {
  444. __entry->task_id = 0;
  445. __entry->client_id = -1;
  446. }
  447. __entry->mr_id = mr->mr_ibmr->res.id;
  448. __entry->nents = mr->mr_nents;
  449. __entry->handle = mr->mr_handle;
  450. __entry->length = mr->mr_length;
  451. __entry->offset = mr->mr_offset;
  452. __entry->dir = mr->mr_dir;
  453. ),
  454. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
  455. " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
  456. __entry->task_id, __entry->client_id,
  457. __entry->mr_id, __entry->nents, __entry->length,
  458. (unsigned long long)__entry->offset, __entry->handle,
  459. xprtrdma_show_direction(__entry->dir)
  460. )
  461. );
  462. #define DEFINE_MR_EVENT(name) \
  463. DEFINE_EVENT(xprtrdma_mr_class, \
  464. xprtrdma_mr_##name, \
  465. TP_PROTO( \
  466. const struct rpcrdma_mr *mr \
  467. ), \
  468. TP_ARGS(mr))
  469. DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
  470. TP_PROTO(
  471. const struct rpcrdma_mr *mr
  472. ),
  473. TP_ARGS(mr),
  474. TP_STRUCT__entry(
  475. __field(u32, mr_id)
  476. __field(int, nents)
  477. __field(u32, handle)
  478. __field(u32, length)
  479. __field(u64, offset)
  480. __field(u32, dir)
  481. ),
  482. TP_fast_assign(
  483. __entry->mr_id = mr->mr_ibmr->res.id;
  484. __entry->nents = mr->mr_nents;
  485. __entry->handle = mr->mr_handle;
  486. __entry->length = mr->mr_length;
  487. __entry->offset = mr->mr_offset;
  488. __entry->dir = mr->mr_dir;
  489. ),
  490. TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
  491. __entry->mr_id, __entry->nents, __entry->length,
  492. (unsigned long long)__entry->offset, __entry->handle,
  493. xprtrdma_show_direction(__entry->dir)
  494. )
  495. );
  496. #define DEFINE_ANON_MR_EVENT(name) \
  497. DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
  498. xprtrdma_mr_##name, \
  499. TP_PROTO( \
  500. const struct rpcrdma_mr *mr \
  501. ), \
  502. TP_ARGS(mr))
  503. DECLARE_EVENT_CLASS(xprtrdma_callback_class,
  504. TP_PROTO(
  505. const struct rpcrdma_xprt *r_xprt,
  506. const struct rpc_rqst *rqst
  507. ),
  508. TP_ARGS(r_xprt, rqst),
  509. TP_STRUCT__entry(
  510. __field(u32, xid)
  511. __string(addr, rpcrdma_addrstr(r_xprt))
  512. __string(port, rpcrdma_portstr(r_xprt))
  513. ),
  514. TP_fast_assign(
  515. __entry->xid = be32_to_cpu(rqst->rq_xid);
  516. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  517. __assign_str(port, rpcrdma_portstr(r_xprt));
  518. ),
  519. TP_printk("peer=[%s]:%s xid=0x%08x",
  520. __get_str(addr), __get_str(port), __entry->xid
  521. )
  522. );
  523. #define DEFINE_CALLBACK_EVENT(name) \
  524. DEFINE_EVENT(xprtrdma_callback_class, \
  525. xprtrdma_cb_##name, \
  526. TP_PROTO( \
  527. const struct rpcrdma_xprt *r_xprt, \
  528. const struct rpc_rqst *rqst \
  529. ), \
  530. TP_ARGS(r_xprt, rqst))
  531. /**
  532. ** Connection events
  533. **/
  534. TRACE_EVENT(xprtrdma_inline_thresh,
  535. TP_PROTO(
  536. const struct rpcrdma_ep *ep
  537. ),
  538. TP_ARGS(ep),
  539. TP_STRUCT__entry(
  540. __field(unsigned int, inline_send)
  541. __field(unsigned int, inline_recv)
  542. __field(unsigned int, max_send)
  543. __field(unsigned int, max_recv)
  544. __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
  545. __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
  546. ),
  547. TP_fast_assign(
  548. const struct rdma_cm_id *id = ep->re_id;
  549. __entry->inline_send = ep->re_inline_send;
  550. __entry->inline_recv = ep->re_inline_recv;
  551. __entry->max_send = ep->re_max_inline_send;
  552. __entry->max_recv = ep->re_max_inline_recv;
  553. memcpy(__entry->srcaddr, &id->route.addr.src_addr,
  554. sizeof(struct sockaddr_in6));
  555. memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
  556. sizeof(struct sockaddr_in6));
  557. ),
  558. TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
  559. __entry->srcaddr, __entry->dstaddr,
  560. __entry->inline_send, __entry->inline_recv,
  561. __entry->max_send, __entry->max_recv
  562. )
  563. );
  564. DEFINE_CONN_EVENT(connect);
  565. DEFINE_CONN_EVENT(disconnect);
  566. DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
  567. TRACE_EVENT(xprtrdma_op_connect,
  568. TP_PROTO(
  569. const struct rpcrdma_xprt *r_xprt,
  570. unsigned long delay
  571. ),
  572. TP_ARGS(r_xprt, delay),
  573. TP_STRUCT__entry(
  574. __field(unsigned long, delay)
  575. __string(addr, rpcrdma_addrstr(r_xprt))
  576. __string(port, rpcrdma_portstr(r_xprt))
  577. ),
  578. TP_fast_assign(
  579. __entry->delay = delay;
  580. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  581. __assign_str(port, rpcrdma_portstr(r_xprt));
  582. ),
  583. TP_printk("peer=[%s]:%s delay=%lu",
  584. __get_str(addr), __get_str(port), __entry->delay
  585. )
  586. );
  587. TRACE_EVENT(xprtrdma_op_set_cto,
  588. TP_PROTO(
  589. const struct rpcrdma_xprt *r_xprt,
  590. unsigned long connect,
  591. unsigned long reconnect
  592. ),
  593. TP_ARGS(r_xprt, connect, reconnect),
  594. TP_STRUCT__entry(
  595. __field(unsigned long, connect)
  596. __field(unsigned long, reconnect)
  597. __string(addr, rpcrdma_addrstr(r_xprt))
  598. __string(port, rpcrdma_portstr(r_xprt))
  599. ),
  600. TP_fast_assign(
  601. __entry->connect = connect;
  602. __entry->reconnect = reconnect;
  603. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  604. __assign_str(port, rpcrdma_portstr(r_xprt));
  605. ),
  606. TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
  607. __get_str(addr), __get_str(port),
  608. __entry->connect / HZ, __entry->reconnect / HZ
  609. )
  610. );
  611. /**
  612. ** Call events
  613. **/
  614. TRACE_EVENT(xprtrdma_createmrs,
  615. TP_PROTO(
  616. const struct rpcrdma_xprt *r_xprt,
  617. unsigned int count
  618. ),
  619. TP_ARGS(r_xprt, count),
  620. TP_STRUCT__entry(
  621. __string(addr, rpcrdma_addrstr(r_xprt))
  622. __string(port, rpcrdma_portstr(r_xprt))
  623. __field(unsigned int, count)
  624. ),
  625. TP_fast_assign(
  626. __entry->count = count;
  627. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  628. __assign_str(port, rpcrdma_portstr(r_xprt));
  629. ),
  630. TP_printk("peer=[%s]:%s created %u MRs",
  631. __get_str(addr), __get_str(port), __entry->count
  632. )
  633. );
  634. TRACE_EVENT(xprtrdma_nomrs_err,
  635. TP_PROTO(
  636. const struct rpcrdma_xprt *r_xprt,
  637. const struct rpcrdma_req *req
  638. ),
  639. TP_ARGS(r_xprt, req),
  640. TP_STRUCT__entry(
  641. __field(unsigned int, task_id)
  642. __field(unsigned int, client_id)
  643. __string(addr, rpcrdma_addrstr(r_xprt))
  644. __string(port, rpcrdma_portstr(r_xprt))
  645. ),
  646. TP_fast_assign(
  647. const struct rpc_rqst *rqst = &req->rl_slot;
  648. __entry->task_id = rqst->rq_task->tk_pid;
  649. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  650. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  651. __assign_str(port, rpcrdma_portstr(r_xprt));
  652. ),
  653. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
  654. __entry->task_id, __entry->client_id,
  655. __get_str(addr), __get_str(port)
  656. )
  657. );
  658. DEFINE_RDCH_EVENT(read);
  659. DEFINE_WRCH_EVENT(write);
  660. DEFINE_WRCH_EVENT(reply);
  661. DEFINE_WRCH_EVENT(wp);
  662. TRACE_DEFINE_ENUM(rpcrdma_noch);
  663. TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
  664. TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
  665. TRACE_DEFINE_ENUM(rpcrdma_readch);
  666. TRACE_DEFINE_ENUM(rpcrdma_areadch);
  667. TRACE_DEFINE_ENUM(rpcrdma_writech);
  668. TRACE_DEFINE_ENUM(rpcrdma_replych);
  669. #define xprtrdma_show_chunktype(x) \
  670. __print_symbolic(x, \
  671. { rpcrdma_noch, "inline" }, \
  672. { rpcrdma_noch_pullup, "pullup" }, \
  673. { rpcrdma_noch_mapped, "mapped" }, \
  674. { rpcrdma_readch, "read list" }, \
  675. { rpcrdma_areadch, "*read list" }, \
  676. { rpcrdma_writech, "write list" }, \
  677. { rpcrdma_replych, "reply chunk" })
  678. TRACE_EVENT(xprtrdma_marshal,
  679. TP_PROTO(
  680. const struct rpcrdma_req *req,
  681. unsigned int rtype,
  682. unsigned int wtype
  683. ),
  684. TP_ARGS(req, rtype, wtype),
  685. TP_STRUCT__entry(
  686. __field(unsigned int, task_id)
  687. __field(unsigned int, client_id)
  688. __field(u32, xid)
  689. __field(unsigned int, hdrlen)
  690. __field(unsigned int, headlen)
  691. __field(unsigned int, pagelen)
  692. __field(unsigned int, taillen)
  693. __field(unsigned int, rtype)
  694. __field(unsigned int, wtype)
  695. ),
  696. TP_fast_assign(
  697. const struct rpc_rqst *rqst = &req->rl_slot;
  698. __entry->task_id = rqst->rq_task->tk_pid;
  699. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  700. __entry->xid = be32_to_cpu(rqst->rq_xid);
  701. __entry->hdrlen = req->rl_hdrbuf.len;
  702. __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
  703. __entry->pagelen = rqst->rq_snd_buf.page_len;
  704. __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
  705. __entry->rtype = rtype;
  706. __entry->wtype = wtype;
  707. ),
  708. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
  709. " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
  710. __entry->task_id, __entry->client_id, __entry->xid,
  711. __entry->hdrlen,
  712. __entry->headlen, __entry->pagelen, __entry->taillen,
  713. xprtrdma_show_chunktype(__entry->rtype),
  714. xprtrdma_show_chunktype(__entry->wtype)
  715. )
  716. );
  717. TRACE_EVENT(xprtrdma_marshal_failed,
  718. TP_PROTO(const struct rpc_rqst *rqst,
  719. int ret
  720. ),
  721. TP_ARGS(rqst, ret),
  722. TP_STRUCT__entry(
  723. __field(unsigned int, task_id)
  724. __field(unsigned int, client_id)
  725. __field(u32, xid)
  726. __field(int, ret)
  727. ),
  728. TP_fast_assign(
  729. __entry->task_id = rqst->rq_task->tk_pid;
  730. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  731. __entry->xid = be32_to_cpu(rqst->rq_xid);
  732. __entry->ret = ret;
  733. ),
  734. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
  735. __entry->task_id, __entry->client_id, __entry->xid,
  736. __entry->ret
  737. )
  738. );
  739. TRACE_EVENT(xprtrdma_prepsend_failed,
  740. TP_PROTO(const struct rpc_rqst *rqst,
  741. int ret
  742. ),
  743. TP_ARGS(rqst, ret),
  744. TP_STRUCT__entry(
  745. __field(unsigned int, task_id)
  746. __field(unsigned int, client_id)
  747. __field(u32, xid)
  748. __field(int, ret)
  749. ),
  750. TP_fast_assign(
  751. __entry->task_id = rqst->rq_task->tk_pid;
  752. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  753. __entry->xid = be32_to_cpu(rqst->rq_xid);
  754. __entry->ret = ret;
  755. ),
  756. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
  757. __entry->task_id, __entry->client_id, __entry->xid,
  758. __entry->ret
  759. )
  760. );
  761. TRACE_EVENT(xprtrdma_post_send,
  762. TP_PROTO(
  763. const struct rpcrdma_req *req
  764. ),
  765. TP_ARGS(req),
  766. TP_STRUCT__entry(
  767. __field(u32, cq_id)
  768. __field(int, completion_id)
  769. __field(unsigned int, task_id)
  770. __field(unsigned int, client_id)
  771. __field(int, num_sge)
  772. __field(int, signaled)
  773. ),
  774. TP_fast_assign(
  775. const struct rpc_rqst *rqst = &req->rl_slot;
  776. const struct rpcrdma_sendctx *sc = req->rl_sendctx;
  777. __entry->cq_id = sc->sc_cid.ci_queue_id;
  778. __entry->completion_id = sc->sc_cid.ci_completion_id;
  779. __entry->task_id = rqst->rq_task->tk_pid;
  780. __entry->client_id = rqst->rq_task->tk_client ?
  781. rqst->rq_task->tk_client->cl_clid : -1;
  782. __entry->num_sge = req->rl_wr.num_sge;
  783. __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
  784. ),
  785. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
  786. __entry->task_id, __entry->client_id,
  787. __entry->cq_id, __entry->completion_id,
  788. __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
  789. (__entry->signaled ? "signaled" : "")
  790. )
  791. );
  792. TRACE_EVENT(xprtrdma_post_send_err,
  793. TP_PROTO(
  794. const struct rpcrdma_xprt *r_xprt,
  795. const struct rpcrdma_req *req,
  796. int rc
  797. ),
  798. TP_ARGS(r_xprt, req, rc),
  799. TP_STRUCT__entry(
  800. __field(u32, cq_id)
  801. __field(unsigned int, task_id)
  802. __field(unsigned int, client_id)
  803. __field(int, rc)
  804. ),
  805. TP_fast_assign(
  806. const struct rpc_rqst *rqst = &req->rl_slot;
  807. const struct rpcrdma_ep *ep = r_xprt->rx_ep;
  808. __entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
  809. __entry->task_id = rqst->rq_task->tk_pid;
  810. __entry->client_id = rqst->rq_task->tk_client ?
  811. rqst->rq_task->tk_client->cl_clid : -1;
  812. __entry->rc = rc;
  813. ),
  814. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
  815. __entry->task_id, __entry->client_id,
  816. __entry->cq_id, __entry->rc
  817. )
  818. );
  819. TRACE_EVENT(xprtrdma_post_recv,
  820. TP_PROTO(
  821. const struct rpcrdma_rep *rep
  822. ),
  823. TP_ARGS(rep),
  824. TP_STRUCT__entry(
  825. __field(u32, cq_id)
  826. __field(int, completion_id)
  827. ),
  828. TP_fast_assign(
  829. __entry->cq_id = rep->rr_cid.ci_queue_id;
  830. __entry->completion_id = rep->rr_cid.ci_completion_id;
  831. ),
  832. TP_printk("cq.id=%d cid=%d",
  833. __entry->cq_id, __entry->completion_id
  834. )
  835. );
  836. TRACE_EVENT(xprtrdma_post_recvs,
  837. TP_PROTO(
  838. const struct rpcrdma_xprt *r_xprt,
  839. unsigned int count
  840. ),
  841. TP_ARGS(r_xprt, count),
  842. TP_STRUCT__entry(
  843. __field(u32, cq_id)
  844. __field(unsigned int, count)
  845. __field(int, posted)
  846. __string(addr, rpcrdma_addrstr(r_xprt))
  847. __string(port, rpcrdma_portstr(r_xprt))
  848. ),
  849. TP_fast_assign(
  850. const struct rpcrdma_ep *ep = r_xprt->rx_ep;
  851. __entry->cq_id = ep->re_attr.recv_cq->res.id;
  852. __entry->count = count;
  853. __entry->posted = ep->re_receive_count;
  854. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  855. __assign_str(port, rpcrdma_portstr(r_xprt));
  856. ),
  857. TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
  858. __get_str(addr), __get_str(port), __entry->cq_id,
  859. __entry->count, __entry->posted
  860. )
  861. );
  862. TRACE_EVENT(xprtrdma_post_recvs_err,
  863. TP_PROTO(
  864. const struct rpcrdma_xprt *r_xprt,
  865. int status
  866. ),
  867. TP_ARGS(r_xprt, status),
  868. TP_STRUCT__entry(
  869. __field(u32, cq_id)
  870. __field(int, status)
  871. __string(addr, rpcrdma_addrstr(r_xprt))
  872. __string(port, rpcrdma_portstr(r_xprt))
  873. ),
  874. TP_fast_assign(
  875. const struct rpcrdma_ep *ep = r_xprt->rx_ep;
  876. __entry->cq_id = ep->re_attr.recv_cq->res.id;
  877. __entry->status = status;
  878. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  879. __assign_str(port, rpcrdma_portstr(r_xprt));
  880. ),
  881. TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
  882. __get_str(addr), __get_str(port), __entry->cq_id,
  883. __entry->status
  884. )
  885. );
  886. TRACE_EVENT(xprtrdma_post_linv_err,
  887. TP_PROTO(
  888. const struct rpcrdma_req *req,
  889. int status
  890. ),
  891. TP_ARGS(req, status),
  892. TP_STRUCT__entry(
  893. __field(unsigned int, task_id)
  894. __field(unsigned int, client_id)
  895. __field(int, status)
  896. ),
  897. TP_fast_assign(
  898. const struct rpc_task *task = req->rl_slot.rq_task;
  899. __entry->task_id = task->tk_pid;
  900. __entry->client_id = task->tk_client->cl_clid;
  901. __entry->status = status;
  902. ),
  903. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
  904. __entry->task_id, __entry->client_id, __entry->status
  905. )
  906. );
  907. /**
  908. ** Completion events
  909. **/
  910. DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
  911. DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
  912. DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
  913. DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
  914. DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
  915. DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
  916. TRACE_EVENT(xprtrdma_frwr_alloc,
  917. TP_PROTO(
  918. const struct rpcrdma_mr *mr,
  919. int rc
  920. ),
  921. TP_ARGS(mr, rc),
  922. TP_STRUCT__entry(
  923. __field(u32, mr_id)
  924. __field(int, rc)
  925. ),
  926. TP_fast_assign(
  927. __entry->mr_id = mr->mr_ibmr->res.id;
  928. __entry->rc = rc;
  929. ),
  930. TP_printk("mr.id=%u: rc=%d",
  931. __entry->mr_id, __entry->rc
  932. )
  933. );
  934. TRACE_EVENT(xprtrdma_frwr_dereg,
  935. TP_PROTO(
  936. const struct rpcrdma_mr *mr,
  937. int rc
  938. ),
  939. TP_ARGS(mr, rc),
  940. TP_STRUCT__entry(
  941. __field(u32, mr_id)
  942. __field(int, nents)
  943. __field(u32, handle)
  944. __field(u32, length)
  945. __field(u64, offset)
  946. __field(u32, dir)
  947. __field(int, rc)
  948. ),
  949. TP_fast_assign(
  950. __entry->mr_id = mr->mr_ibmr->res.id;
  951. __entry->nents = mr->mr_nents;
  952. __entry->handle = mr->mr_handle;
  953. __entry->length = mr->mr_length;
  954. __entry->offset = mr->mr_offset;
  955. __entry->dir = mr->mr_dir;
  956. __entry->rc = rc;
  957. ),
  958. TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
  959. __entry->mr_id, __entry->nents, __entry->length,
  960. (unsigned long long)__entry->offset, __entry->handle,
  961. xprtrdma_show_direction(__entry->dir),
  962. __entry->rc
  963. )
  964. );
  965. TRACE_EVENT(xprtrdma_frwr_sgerr,
  966. TP_PROTO(
  967. const struct rpcrdma_mr *mr,
  968. int sg_nents
  969. ),
  970. TP_ARGS(mr, sg_nents),
  971. TP_STRUCT__entry(
  972. __field(u32, mr_id)
  973. __field(u64, addr)
  974. __field(u32, dir)
  975. __field(int, nents)
  976. ),
  977. TP_fast_assign(
  978. __entry->mr_id = mr->mr_ibmr->res.id;
  979. __entry->addr = mr->mr_sg->dma_address;
  980. __entry->dir = mr->mr_dir;
  981. __entry->nents = sg_nents;
  982. ),
  983. TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
  984. __entry->mr_id, __entry->addr,
  985. xprtrdma_show_direction(__entry->dir),
  986. __entry->nents
  987. )
  988. );
  989. TRACE_EVENT(xprtrdma_frwr_maperr,
  990. TP_PROTO(
  991. const struct rpcrdma_mr *mr,
  992. int num_mapped
  993. ),
  994. TP_ARGS(mr, num_mapped),
  995. TP_STRUCT__entry(
  996. __field(u32, mr_id)
  997. __field(u64, addr)
  998. __field(u32, dir)
  999. __field(int, num_mapped)
  1000. __field(int, nents)
  1001. ),
  1002. TP_fast_assign(
  1003. __entry->mr_id = mr->mr_ibmr->res.id;
  1004. __entry->addr = mr->mr_sg->dma_address;
  1005. __entry->dir = mr->mr_dir;
  1006. __entry->num_mapped = num_mapped;
  1007. __entry->nents = mr->mr_nents;
  1008. ),
  1009. TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
  1010. __entry->mr_id, __entry->addr,
  1011. xprtrdma_show_direction(__entry->dir),
  1012. __entry->num_mapped, __entry->nents
  1013. )
  1014. );
  1015. DEFINE_MR_EVENT(fastreg);
  1016. DEFINE_MR_EVENT(localinv);
  1017. DEFINE_MR_EVENT(reminv);
  1018. DEFINE_MR_EVENT(map);
  1019. DEFINE_ANON_MR_EVENT(unmap);
  1020. TRACE_EVENT(xprtrdma_dma_maperr,
  1021. TP_PROTO(
  1022. u64 addr
  1023. ),
  1024. TP_ARGS(addr),
  1025. TP_STRUCT__entry(
  1026. __field(u64, addr)
  1027. ),
  1028. TP_fast_assign(
  1029. __entry->addr = addr;
  1030. ),
  1031. TP_printk("dma addr=0x%llx\n", __entry->addr)
  1032. );
  1033. /**
  1034. ** Reply events
  1035. **/
  1036. TRACE_EVENT(xprtrdma_reply,
  1037. TP_PROTO(
  1038. const struct rpc_task *task,
  1039. const struct rpcrdma_rep *rep,
  1040. unsigned int credits
  1041. ),
  1042. TP_ARGS(task, rep, credits),
  1043. TP_STRUCT__entry(
  1044. __field(unsigned int, task_id)
  1045. __field(unsigned int, client_id)
  1046. __field(u32, xid)
  1047. __field(unsigned int, credits)
  1048. ),
  1049. TP_fast_assign(
  1050. __entry->task_id = task->tk_pid;
  1051. __entry->client_id = task->tk_client->cl_clid;
  1052. __entry->xid = be32_to_cpu(rep->rr_xid);
  1053. __entry->credits = credits;
  1054. ),
  1055. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
  1056. __entry->task_id, __entry->client_id, __entry->xid,
  1057. __entry->credits
  1058. )
  1059. );
  1060. DEFINE_REPLY_EVENT(vers);
  1061. DEFINE_REPLY_EVENT(rqst);
  1062. DEFINE_REPLY_EVENT(short);
  1063. DEFINE_REPLY_EVENT(hdr);
  1064. TRACE_EVENT(xprtrdma_err_vers,
  1065. TP_PROTO(
  1066. const struct rpc_rqst *rqst,
  1067. __be32 *min,
  1068. __be32 *max
  1069. ),
  1070. TP_ARGS(rqst, min, max),
  1071. TP_STRUCT__entry(
  1072. __field(unsigned int, task_id)
  1073. __field(unsigned int, client_id)
  1074. __field(u32, xid)
  1075. __field(u32, min)
  1076. __field(u32, max)
  1077. ),
  1078. TP_fast_assign(
  1079. __entry->task_id = rqst->rq_task->tk_pid;
  1080. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  1081. __entry->xid = be32_to_cpu(rqst->rq_xid);
  1082. __entry->min = be32_to_cpup(min);
  1083. __entry->max = be32_to_cpup(max);
  1084. ),
  1085. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
  1086. __entry->task_id, __entry->client_id, __entry->xid,
  1087. __entry->min, __entry->max
  1088. )
  1089. );
  1090. TRACE_EVENT(xprtrdma_err_chunk,
  1091. TP_PROTO(
  1092. const struct rpc_rqst *rqst
  1093. ),
  1094. TP_ARGS(rqst),
  1095. TP_STRUCT__entry(
  1096. __field(unsigned int, task_id)
  1097. __field(unsigned int, client_id)
  1098. __field(u32, xid)
  1099. ),
  1100. TP_fast_assign(
  1101. __entry->task_id = rqst->rq_task->tk_pid;
  1102. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  1103. __entry->xid = be32_to_cpu(rqst->rq_xid);
  1104. ),
  1105. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
  1106. __entry->task_id, __entry->client_id, __entry->xid
  1107. )
  1108. );
  1109. TRACE_EVENT(xprtrdma_err_unrecognized,
  1110. TP_PROTO(
  1111. const struct rpc_rqst *rqst,
  1112. __be32 *procedure
  1113. ),
  1114. TP_ARGS(rqst, procedure),
  1115. TP_STRUCT__entry(
  1116. __field(unsigned int, task_id)
  1117. __field(unsigned int, client_id)
  1118. __field(u32, xid)
  1119. __field(u32, procedure)
  1120. ),
  1121. TP_fast_assign(
  1122. __entry->task_id = rqst->rq_task->tk_pid;
  1123. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  1124. __entry->procedure = be32_to_cpup(procedure);
  1125. ),
  1126. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
  1127. __entry->task_id, __entry->client_id, __entry->xid,
  1128. __entry->procedure
  1129. )
  1130. );
  1131. TRACE_EVENT(xprtrdma_fixup,
  1132. TP_PROTO(
  1133. const struct rpc_rqst *rqst,
  1134. unsigned long fixup
  1135. ),
  1136. TP_ARGS(rqst, fixup),
  1137. TP_STRUCT__entry(
  1138. __field(unsigned int, task_id)
  1139. __field(unsigned int, client_id)
  1140. __field(unsigned long, fixup)
  1141. __field(size_t, headlen)
  1142. __field(unsigned int, pagelen)
  1143. __field(size_t, taillen)
  1144. ),
  1145. TP_fast_assign(
  1146. __entry->task_id = rqst->rq_task->tk_pid;
  1147. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  1148. __entry->fixup = fixup;
  1149. __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
  1150. __entry->pagelen = rqst->rq_rcv_buf.page_len;
  1151. __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
  1152. ),
  1153. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
  1154. __entry->task_id, __entry->client_id, __entry->fixup,
  1155. __entry->headlen, __entry->pagelen, __entry->taillen
  1156. )
  1157. );
  1158. TRACE_EVENT(xprtrdma_decode_seg,
  1159. TP_PROTO(
  1160. u32 handle,
  1161. u32 length,
  1162. u64 offset
  1163. ),
  1164. TP_ARGS(handle, length, offset),
  1165. TP_STRUCT__entry(
  1166. __field(u32, handle)
  1167. __field(u32, length)
  1168. __field(u64, offset)
  1169. ),
  1170. TP_fast_assign(
  1171. __entry->handle = handle;
  1172. __entry->length = length;
  1173. __entry->offset = offset;
  1174. ),
  1175. TP_printk("%u@0x%016llx:0x%08x",
  1176. __entry->length, (unsigned long long)__entry->offset,
  1177. __entry->handle
  1178. )
  1179. );
  1180. TRACE_EVENT(xprtrdma_mrs_zap,
  1181. TP_PROTO(
  1182. const struct rpc_task *task
  1183. ),
  1184. TP_ARGS(task),
  1185. TP_STRUCT__entry(
  1186. __field(unsigned int, task_id)
  1187. __field(unsigned int, client_id)
  1188. ),
  1189. TP_fast_assign(
  1190. __entry->task_id = task->tk_pid;
  1191. __entry->client_id = task->tk_client->cl_clid;
  1192. ),
  1193. TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
  1194. __entry->task_id, __entry->client_id
  1195. )
  1196. );
  1197. /**
  1198. ** Callback events
  1199. **/
  1200. TRACE_EVENT(xprtrdma_cb_setup,
  1201. TP_PROTO(
  1202. const struct rpcrdma_xprt *r_xprt,
  1203. unsigned int reqs
  1204. ),
  1205. TP_ARGS(r_xprt, reqs),
  1206. TP_STRUCT__entry(
  1207. __field(unsigned int, reqs)
  1208. __string(addr, rpcrdma_addrstr(r_xprt))
  1209. __string(port, rpcrdma_portstr(r_xprt))
  1210. ),
  1211. TP_fast_assign(
  1212. __entry->reqs = reqs;
  1213. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  1214. __assign_str(port, rpcrdma_portstr(r_xprt));
  1215. ),
  1216. TP_printk("peer=[%s]:%s %u reqs",
  1217. __get_str(addr), __get_str(port), __entry->reqs
  1218. )
  1219. );
  1220. DEFINE_CALLBACK_EVENT(call);
  1221. DEFINE_CALLBACK_EVENT(reply);
  1222. /**
  1223. ** Server-side RPC/RDMA events
  1224. **/
  1225. DECLARE_EVENT_CLASS(svcrdma_accept_class,
  1226. TP_PROTO(
  1227. const struct svcxprt_rdma *rdma,
  1228. long status
  1229. ),
  1230. TP_ARGS(rdma, status),
  1231. TP_STRUCT__entry(
  1232. __field(long, status)
  1233. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1234. ),
  1235. TP_fast_assign(
  1236. __entry->status = status;
  1237. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1238. ),
  1239. TP_printk("addr=%s status=%ld",
  1240. __get_str(addr), __entry->status
  1241. )
  1242. );
  1243. #define DEFINE_ACCEPT_EVENT(name) \
  1244. DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
  1245. TP_PROTO( \
  1246. const struct svcxprt_rdma *rdma, \
  1247. long status \
  1248. ), \
  1249. TP_ARGS(rdma, status))
  1250. DEFINE_ACCEPT_EVENT(pd);
  1251. DEFINE_ACCEPT_EVENT(qp);
  1252. DEFINE_ACCEPT_EVENT(fabric);
  1253. DEFINE_ACCEPT_EVENT(initdepth);
  1254. DEFINE_ACCEPT_EVENT(accept);
  1255. TRACE_DEFINE_ENUM(RDMA_MSG);
  1256. TRACE_DEFINE_ENUM(RDMA_NOMSG);
  1257. TRACE_DEFINE_ENUM(RDMA_MSGP);
  1258. TRACE_DEFINE_ENUM(RDMA_DONE);
  1259. TRACE_DEFINE_ENUM(RDMA_ERROR);
  1260. #define show_rpcrdma_proc(x) \
  1261. __print_symbolic(x, \
  1262. { RDMA_MSG, "RDMA_MSG" }, \
  1263. { RDMA_NOMSG, "RDMA_NOMSG" }, \
  1264. { RDMA_MSGP, "RDMA_MSGP" }, \
  1265. { RDMA_DONE, "RDMA_DONE" }, \
  1266. { RDMA_ERROR, "RDMA_ERROR" })
  1267. TRACE_EVENT(svcrdma_decode_rqst,
  1268. TP_PROTO(
  1269. const struct svc_rdma_recv_ctxt *ctxt,
  1270. __be32 *p,
  1271. unsigned int hdrlen
  1272. ),
  1273. TP_ARGS(ctxt, p, hdrlen),
  1274. TP_STRUCT__entry(
  1275. __field(u32, cq_id)
  1276. __field(int, completion_id)
  1277. __field(u32, xid)
  1278. __field(u32, vers)
  1279. __field(u32, proc)
  1280. __field(u32, credits)
  1281. __field(unsigned int, hdrlen)
  1282. ),
  1283. TP_fast_assign(
  1284. __entry->cq_id = ctxt->rc_cid.ci_queue_id;
  1285. __entry->completion_id = ctxt->rc_cid.ci_completion_id;
  1286. __entry->xid = be32_to_cpup(p++);
  1287. __entry->vers = be32_to_cpup(p++);
  1288. __entry->credits = be32_to_cpup(p++);
  1289. __entry->proc = be32_to_cpup(p);
  1290. __entry->hdrlen = hdrlen;
  1291. ),
  1292. TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
  1293. __entry->cq_id, __entry->completion_id,
  1294. __entry->xid, __entry->vers, __entry->credits,
  1295. show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
  1296. );
  1297. TRACE_EVENT(svcrdma_decode_short_err,
  1298. TP_PROTO(
  1299. const struct svc_rdma_recv_ctxt *ctxt,
  1300. unsigned int hdrlen
  1301. ),
  1302. TP_ARGS(ctxt, hdrlen),
  1303. TP_STRUCT__entry(
  1304. __field(u32, cq_id)
  1305. __field(int, completion_id)
  1306. __field(unsigned int, hdrlen)
  1307. ),
  1308. TP_fast_assign(
  1309. __entry->cq_id = ctxt->rc_cid.ci_queue_id;
  1310. __entry->completion_id = ctxt->rc_cid.ci_completion_id;
  1311. __entry->hdrlen = hdrlen;
  1312. ),
  1313. TP_printk("cq.id=%u cid=%d hdrlen=%u",
  1314. __entry->cq_id, __entry->completion_id,
  1315. __entry->hdrlen)
  1316. );
  1317. DECLARE_EVENT_CLASS(svcrdma_badreq_event,
  1318. TP_PROTO(
  1319. const struct svc_rdma_recv_ctxt *ctxt,
  1320. __be32 *p
  1321. ),
  1322. TP_ARGS(ctxt, p),
  1323. TP_STRUCT__entry(
  1324. __field(u32, cq_id)
  1325. __field(int, completion_id)
  1326. __field(u32, xid)
  1327. __field(u32, vers)
  1328. __field(u32, proc)
  1329. __field(u32, credits)
  1330. ),
  1331. TP_fast_assign(
  1332. __entry->cq_id = ctxt->rc_cid.ci_queue_id;
  1333. __entry->completion_id = ctxt->rc_cid.ci_completion_id;
  1334. __entry->xid = be32_to_cpup(p++);
  1335. __entry->vers = be32_to_cpup(p++);
  1336. __entry->credits = be32_to_cpup(p++);
  1337. __entry->proc = be32_to_cpup(p);
  1338. ),
  1339. TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
  1340. __entry->cq_id, __entry->completion_id,
  1341. __entry->xid, __entry->vers, __entry->credits, __entry->proc)
  1342. );
  1343. #define DEFINE_BADREQ_EVENT(name) \
  1344. DEFINE_EVENT(svcrdma_badreq_event, \
  1345. svcrdma_decode_##name##_err, \
  1346. TP_PROTO( \
  1347. const struct svc_rdma_recv_ctxt *ctxt, \
  1348. __be32 *p \
  1349. ), \
  1350. TP_ARGS(ctxt, p))
  1351. DEFINE_BADREQ_EVENT(badvers);
  1352. DEFINE_BADREQ_EVENT(drop);
  1353. DEFINE_BADREQ_EVENT(badproc);
  1354. DEFINE_BADREQ_EVENT(parse);
  1355. TRACE_EVENT(svcrdma_encode_wseg,
  1356. TP_PROTO(
  1357. const struct svc_rdma_send_ctxt *ctxt,
  1358. u32 segno,
  1359. u32 handle,
  1360. u32 length,
  1361. u64 offset
  1362. ),
  1363. TP_ARGS(ctxt, segno, handle, length, offset),
  1364. TP_STRUCT__entry(
  1365. __field(u32, cq_id)
  1366. __field(int, completion_id)
  1367. __field(u32, segno)
  1368. __field(u32, handle)
  1369. __field(u32, length)
  1370. __field(u64, offset)
  1371. ),
  1372. TP_fast_assign(
  1373. __entry->cq_id = ctxt->sc_cid.ci_queue_id;
  1374. __entry->completion_id = ctxt->sc_cid.ci_completion_id;
  1375. __entry->segno = segno;
  1376. __entry->handle = handle;
  1377. __entry->length = length;
  1378. __entry->offset = offset;
  1379. ),
  1380. TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
  1381. __entry->cq_id, __entry->completion_id,
  1382. __entry->segno, __entry->length,
  1383. (unsigned long long)__entry->offset, __entry->handle
  1384. )
  1385. );
  1386. TRACE_EVENT(svcrdma_decode_rseg,
  1387. TP_PROTO(
  1388. const struct rpc_rdma_cid *cid,
  1389. const struct svc_rdma_chunk *chunk,
  1390. const struct svc_rdma_segment *segment
  1391. ),
  1392. TP_ARGS(cid, chunk, segment),
  1393. TP_STRUCT__entry(
  1394. __field(u32, cq_id)
  1395. __field(int, completion_id)
  1396. __field(u32, segno)
  1397. __field(u32, position)
  1398. __field(u32, handle)
  1399. __field(u32, length)
  1400. __field(u64, offset)
  1401. ),
  1402. TP_fast_assign(
  1403. __entry->cq_id = cid->ci_queue_id;
  1404. __entry->completion_id = cid->ci_completion_id;
  1405. __entry->segno = chunk->ch_segcount;
  1406. __entry->position = chunk->ch_position;
  1407. __entry->handle = segment->rs_handle;
  1408. __entry->length = segment->rs_length;
  1409. __entry->offset = segment->rs_offset;
  1410. ),
  1411. TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
  1412. __entry->cq_id, __entry->completion_id,
  1413. __entry->segno, __entry->position, __entry->length,
  1414. (unsigned long long)__entry->offset, __entry->handle
  1415. )
  1416. );
  1417. TRACE_EVENT(svcrdma_decode_wseg,
  1418. TP_PROTO(
  1419. const struct rpc_rdma_cid *cid,
  1420. const struct svc_rdma_chunk *chunk,
  1421. u32 segno
  1422. ),
  1423. TP_ARGS(cid, chunk, segno),
  1424. TP_STRUCT__entry(
  1425. __field(u32, cq_id)
  1426. __field(int, completion_id)
  1427. __field(u32, segno)
  1428. __field(u32, handle)
  1429. __field(u32, length)
  1430. __field(u64, offset)
  1431. ),
  1432. TP_fast_assign(
  1433. const struct svc_rdma_segment *segment =
  1434. &chunk->ch_segments[segno];
  1435. __entry->cq_id = cid->ci_queue_id;
  1436. __entry->completion_id = cid->ci_completion_id;
  1437. __entry->segno = segno;
  1438. __entry->handle = segment->rs_handle;
  1439. __entry->length = segment->rs_length;
  1440. __entry->offset = segment->rs_offset;
  1441. ),
  1442. TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
  1443. __entry->cq_id, __entry->completion_id,
  1444. __entry->segno, __entry->length,
  1445. (unsigned long long)__entry->offset, __entry->handle
  1446. )
  1447. );
  1448. DECLARE_EVENT_CLASS(svcrdma_error_event,
  1449. TP_PROTO(
  1450. __be32 xid
  1451. ),
  1452. TP_ARGS(xid),
  1453. TP_STRUCT__entry(
  1454. __field(u32, xid)
  1455. ),
  1456. TP_fast_assign(
  1457. __entry->xid = be32_to_cpu(xid);
  1458. ),
  1459. TP_printk("xid=0x%08x",
  1460. __entry->xid
  1461. )
  1462. );
  1463. #define DEFINE_ERROR_EVENT(name) \
  1464. DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
  1465. TP_PROTO( \
  1466. __be32 xid \
  1467. ), \
  1468. TP_ARGS(xid))
  1469. DEFINE_ERROR_EVENT(vers);
  1470. DEFINE_ERROR_EVENT(chunk);
  1471. /**
  1472. ** Server-side RDMA API events
  1473. **/
  1474. DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
  1475. TP_PROTO(
  1476. const struct svcxprt_rdma *rdma,
  1477. u64 dma_addr,
  1478. u32 length
  1479. ),
  1480. TP_ARGS(rdma, dma_addr, length),
  1481. TP_STRUCT__entry(
  1482. __field(u64, dma_addr)
  1483. __field(u32, length)
  1484. __string(device, rdma->sc_cm_id->device->name)
  1485. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1486. ),
  1487. TP_fast_assign(
  1488. __entry->dma_addr = dma_addr;
  1489. __entry->length = length;
  1490. __assign_str(device, rdma->sc_cm_id->device->name);
  1491. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1492. ),
  1493. TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
  1494. __get_str(addr), __get_str(device),
  1495. __entry->dma_addr, __entry->length
  1496. )
  1497. );
  1498. #define DEFINE_SVC_DMA_EVENT(name) \
  1499. DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
  1500. TP_PROTO( \
  1501. const struct svcxprt_rdma *rdma,\
  1502. u64 dma_addr, \
  1503. u32 length \
  1504. ), \
  1505. TP_ARGS(rdma, dma_addr, length))
  1506. DEFINE_SVC_DMA_EVENT(dma_map_page);
  1507. DEFINE_SVC_DMA_EVENT(dma_map_err);
  1508. DEFINE_SVC_DMA_EVENT(dma_unmap_page);
  1509. TRACE_EVENT(svcrdma_dma_map_rw_err,
  1510. TP_PROTO(
  1511. const struct svcxprt_rdma *rdma,
  1512. unsigned int nents,
  1513. int status
  1514. ),
  1515. TP_ARGS(rdma, nents, status),
  1516. TP_STRUCT__entry(
  1517. __field(int, status)
  1518. __field(unsigned int, nents)
  1519. __string(device, rdma->sc_cm_id->device->name)
  1520. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1521. ),
  1522. TP_fast_assign(
  1523. __entry->status = status;
  1524. __entry->nents = nents;
  1525. __assign_str(device, rdma->sc_cm_id->device->name);
  1526. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1527. ),
  1528. TP_printk("addr=%s device=%s nents=%u status=%d",
  1529. __get_str(addr), __get_str(device), __entry->nents,
  1530. __entry->status
  1531. )
  1532. );
  1533. TRACE_EVENT(svcrdma_no_rwctx_err,
  1534. TP_PROTO(
  1535. const struct svcxprt_rdma *rdma,
  1536. unsigned int num_sges
  1537. ),
  1538. TP_ARGS(rdma, num_sges),
  1539. TP_STRUCT__entry(
  1540. __field(unsigned int, num_sges)
  1541. __string(device, rdma->sc_cm_id->device->name)
  1542. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1543. ),
  1544. TP_fast_assign(
  1545. __entry->num_sges = num_sges;
  1546. __assign_str(device, rdma->sc_cm_id->device->name);
  1547. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1548. ),
  1549. TP_printk("addr=%s device=%s num_sges=%d",
  1550. __get_str(addr), __get_str(device), __entry->num_sges
  1551. )
  1552. );
  1553. TRACE_EVENT(svcrdma_page_overrun_err,
  1554. TP_PROTO(
  1555. const struct svcxprt_rdma *rdma,
  1556. const struct svc_rqst *rqst,
  1557. unsigned int pageno
  1558. ),
  1559. TP_ARGS(rdma, rqst, pageno),
  1560. TP_STRUCT__entry(
  1561. __field(unsigned int, pageno)
  1562. __field(u32, xid)
  1563. __string(device, rdma->sc_cm_id->device->name)
  1564. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1565. ),
  1566. TP_fast_assign(
  1567. __entry->pageno = pageno;
  1568. __entry->xid = __be32_to_cpu(rqst->rq_xid);
  1569. __assign_str(device, rdma->sc_cm_id->device->name);
  1570. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1571. ),
  1572. TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
  1573. __get_str(device), __entry->xid, __entry->pageno
  1574. )
  1575. );
  1576. TRACE_EVENT(svcrdma_small_wrch_err,
  1577. TP_PROTO(
  1578. const struct svcxprt_rdma *rdma,
  1579. unsigned int remaining,
  1580. unsigned int seg_no,
  1581. unsigned int num_segs
  1582. ),
  1583. TP_ARGS(rdma, remaining, seg_no, num_segs),
  1584. TP_STRUCT__entry(
  1585. __field(unsigned int, remaining)
  1586. __field(unsigned int, seg_no)
  1587. __field(unsigned int, num_segs)
  1588. __string(device, rdma->sc_cm_id->device->name)
  1589. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1590. ),
  1591. TP_fast_assign(
  1592. __entry->remaining = remaining;
  1593. __entry->seg_no = seg_no;
  1594. __entry->num_segs = num_segs;
  1595. __assign_str(device, rdma->sc_cm_id->device->name);
  1596. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1597. ),
  1598. TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
  1599. __get_str(addr), __get_str(device), __entry->remaining,
  1600. __entry->seg_no, __entry->num_segs
  1601. )
  1602. );
  1603. TRACE_EVENT(svcrdma_send_pullup,
  1604. TP_PROTO(
  1605. const struct svc_rdma_send_ctxt *ctxt,
  1606. unsigned int msglen
  1607. ),
  1608. TP_ARGS(ctxt, msglen),
  1609. TP_STRUCT__entry(
  1610. __field(u32, cq_id)
  1611. __field(int, completion_id)
  1612. __field(unsigned int, hdrlen)
  1613. __field(unsigned int, msglen)
  1614. ),
  1615. TP_fast_assign(
  1616. __entry->cq_id = ctxt->sc_cid.ci_queue_id;
  1617. __entry->completion_id = ctxt->sc_cid.ci_completion_id;
  1618. __entry->hdrlen = ctxt->sc_hdrbuf.len,
  1619. __entry->msglen = msglen;
  1620. ),
  1621. TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
  1622. __entry->cq_id, __entry->completion_id,
  1623. __entry->hdrlen, __entry->msglen,
  1624. __entry->hdrlen + __entry->msglen)
  1625. );
  1626. TRACE_EVENT(svcrdma_send_err,
  1627. TP_PROTO(
  1628. const struct svc_rqst *rqst,
  1629. int status
  1630. ),
  1631. TP_ARGS(rqst, status),
  1632. TP_STRUCT__entry(
  1633. __field(int, status)
  1634. __field(u32, xid)
  1635. __string(addr, rqst->rq_xprt->xpt_remotebuf)
  1636. ),
  1637. TP_fast_assign(
  1638. __entry->status = status;
  1639. __entry->xid = __be32_to_cpu(rqst->rq_xid);
  1640. __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
  1641. ),
  1642. TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
  1643. __entry->xid, __entry->status
  1644. )
  1645. );
  1646. TRACE_EVENT(svcrdma_post_send,
  1647. TP_PROTO(
  1648. const struct svc_rdma_send_ctxt *ctxt
  1649. ),
  1650. TP_ARGS(ctxt),
  1651. TP_STRUCT__entry(
  1652. __field(u32, cq_id)
  1653. __field(int, completion_id)
  1654. __field(unsigned int, num_sge)
  1655. __field(u32, inv_rkey)
  1656. ),
  1657. TP_fast_assign(
  1658. const struct ib_send_wr *wr = &ctxt->sc_send_wr;
  1659. __entry->cq_id = ctxt->sc_cid.ci_queue_id;
  1660. __entry->completion_id = ctxt->sc_cid.ci_completion_id;
  1661. __entry->num_sge = wr->num_sge;
  1662. __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
  1663. wr->ex.invalidate_rkey : 0;
  1664. ),
  1665. TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
  1666. __entry->cq_id, __entry->completion_id,
  1667. __entry->num_sge, __entry->inv_rkey
  1668. )
  1669. );
  1670. DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
  1671. DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
  1672. DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
  1673. TRACE_EVENT(svcrdma_post_recv,
  1674. TP_PROTO(
  1675. const struct svc_rdma_recv_ctxt *ctxt
  1676. ),
  1677. TP_ARGS(ctxt),
  1678. TP_STRUCT__entry(
  1679. __field(u32, cq_id)
  1680. __field(int, completion_id)
  1681. ),
  1682. TP_fast_assign(
  1683. __entry->cq_id = ctxt->rc_cid.ci_queue_id;
  1684. __entry->completion_id = ctxt->rc_cid.ci_completion_id;
  1685. ),
  1686. TP_printk("cq.id=%d cid=%d",
  1687. __entry->cq_id, __entry->completion_id
  1688. )
  1689. );
  1690. DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
  1691. DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
  1692. DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
  1693. TRACE_EVENT(svcrdma_rq_post_err,
  1694. TP_PROTO(
  1695. const struct svcxprt_rdma *rdma,
  1696. int status
  1697. ),
  1698. TP_ARGS(rdma, status),
  1699. TP_STRUCT__entry(
  1700. __field(int, status)
  1701. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1702. ),
  1703. TP_fast_assign(
  1704. __entry->status = status;
  1705. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1706. ),
  1707. TP_printk("addr=%s status=%d",
  1708. __get_str(addr), __entry->status
  1709. )
  1710. );
  1711. DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
  1712. TP_PROTO(
  1713. const struct rpc_rdma_cid *cid,
  1714. int sqecount
  1715. ),
  1716. TP_ARGS(cid, sqecount),
  1717. TP_STRUCT__entry(
  1718. __field(u32, cq_id)
  1719. __field(int, completion_id)
  1720. __field(int, sqecount)
  1721. ),
  1722. TP_fast_assign(
  1723. __entry->cq_id = cid->ci_queue_id;
  1724. __entry->completion_id = cid->ci_completion_id;
  1725. __entry->sqecount = sqecount;
  1726. ),
  1727. TP_printk("cq.id=%u cid=%d sqecount=%d",
  1728. __entry->cq_id, __entry->completion_id,
  1729. __entry->sqecount
  1730. )
  1731. );
  1732. #define DEFINE_POST_CHUNK_EVENT(name) \
  1733. DEFINE_EVENT(svcrdma_post_chunk_class, \
  1734. svcrdma_post_##name##_chunk, \
  1735. TP_PROTO( \
  1736. const struct rpc_rdma_cid *cid, \
  1737. int sqecount \
  1738. ), \
  1739. TP_ARGS(cid, sqecount))
  1740. DEFINE_POST_CHUNK_EVENT(read);
  1741. DEFINE_POST_CHUNK_EVENT(write);
  1742. DEFINE_POST_CHUNK_EVENT(reply);
  1743. TRACE_EVENT(svcrdma_wc_read,
  1744. TP_PROTO(
  1745. const struct ib_wc *wc,
  1746. const struct rpc_rdma_cid *cid,
  1747. unsigned int totalbytes,
  1748. const ktime_t posttime
  1749. ),
  1750. TP_ARGS(wc, cid, totalbytes, posttime),
  1751. TP_STRUCT__entry(
  1752. __field(u32, cq_id)
  1753. __field(int, completion_id)
  1754. __field(s64, read_latency)
  1755. __field(unsigned int, totalbytes)
  1756. ),
  1757. TP_fast_assign(
  1758. __entry->cq_id = cid->ci_queue_id;
  1759. __entry->completion_id = cid->ci_completion_id;
  1760. __entry->totalbytes = totalbytes;
  1761. __entry->read_latency = ktime_us_delta(ktime_get(), posttime);
  1762. ),
  1763. TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
  1764. __entry->cq_id, __entry->completion_id,
  1765. __entry->totalbytes, __entry->read_latency
  1766. )
  1767. );
  1768. DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
  1769. DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
  1770. DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
  1771. DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
  1772. DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
  1773. TRACE_EVENT(svcrdma_qp_error,
  1774. TP_PROTO(
  1775. const struct ib_event *event,
  1776. const struct sockaddr *sap
  1777. ),
  1778. TP_ARGS(event, sap),
  1779. TP_STRUCT__entry(
  1780. __field(unsigned int, event)
  1781. __string(device, event->device->name)
  1782. __array(__u8, addr, INET6_ADDRSTRLEN + 10)
  1783. ),
  1784. TP_fast_assign(
  1785. __entry->event = event->event;
  1786. __assign_str(device, event->device->name);
  1787. snprintf(__entry->addr, sizeof(__entry->addr) - 1,
  1788. "%pISpc", sap);
  1789. ),
  1790. TP_printk("addr=%s dev=%s event=%s (%u)",
  1791. __entry->addr, __get_str(device),
  1792. rdma_show_ib_event(__entry->event), __entry->event
  1793. )
  1794. );
  1795. DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
  1796. TP_PROTO(
  1797. const struct svcxprt_rdma *rdma
  1798. ),
  1799. TP_ARGS(rdma),
  1800. TP_STRUCT__entry(
  1801. __field(int, avail)
  1802. __field(int, depth)
  1803. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1804. ),
  1805. TP_fast_assign(
  1806. __entry->avail = atomic_read(&rdma->sc_sq_avail);
  1807. __entry->depth = rdma->sc_sq_depth;
  1808. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1809. ),
  1810. TP_printk("addr=%s sc_sq_avail=%d/%d",
  1811. __get_str(addr), __entry->avail, __entry->depth
  1812. )
  1813. );
  1814. #define DEFINE_SQ_EVENT(name) \
  1815. DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
  1816. TP_PROTO( \
  1817. const struct svcxprt_rdma *rdma \
  1818. ), \
  1819. TP_ARGS(rdma))
  1820. DEFINE_SQ_EVENT(full);
  1821. DEFINE_SQ_EVENT(retry);
  1822. TRACE_EVENT(svcrdma_sq_post_err,
  1823. TP_PROTO(
  1824. const struct svcxprt_rdma *rdma,
  1825. int status
  1826. ),
  1827. TP_ARGS(rdma, status),
  1828. TP_STRUCT__entry(
  1829. __field(int, avail)
  1830. __field(int, depth)
  1831. __field(int, status)
  1832. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1833. ),
  1834. TP_fast_assign(
  1835. __entry->avail = atomic_read(&rdma->sc_sq_avail);
  1836. __entry->depth = rdma->sc_sq_depth;
  1837. __entry->status = status;
  1838. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1839. ),
  1840. TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
  1841. __get_str(addr), __entry->avail, __entry->depth,
  1842. __entry->status
  1843. )
  1844. );
  1845. #endif /* _TRACE_RPCRDMA_H */
  1846. #include <trace/define_trace.h>