rpcrdma.h 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2017, 2018 Oracle. All rights reserved.
  4. *
  5. * Trace point definitions for the "rpcrdma" subsystem.
  6. */
  7. #undef TRACE_SYSTEM
  8. #define TRACE_SYSTEM rpcrdma
  9. #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
  10. #define _TRACE_RPCRDMA_H
  11. #include <linux/tracepoint.h>
  12. #include <trace/events/rdma.h>
  13. /**
  14. ** Event classes
  15. **/
  16. DECLARE_EVENT_CLASS(xprtrdma_reply_event,
  17. TP_PROTO(
  18. const struct rpcrdma_rep *rep
  19. ),
  20. TP_ARGS(rep),
  21. TP_STRUCT__entry(
  22. __field(const void *, rep)
  23. __field(const void *, r_xprt)
  24. __field(u32, xid)
  25. __field(u32, version)
  26. __field(u32, proc)
  27. ),
  28. TP_fast_assign(
  29. __entry->rep = rep;
  30. __entry->r_xprt = rep->rr_rxprt;
  31. __entry->xid = be32_to_cpu(rep->rr_xid);
  32. __entry->version = be32_to_cpu(rep->rr_vers);
  33. __entry->proc = be32_to_cpu(rep->rr_proc);
  34. ),
  35. TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
  36. __entry->r_xprt, __entry->xid, __entry->rep,
  37. __entry->version, __entry->proc
  38. )
  39. );
  40. #define DEFINE_REPLY_EVENT(name) \
  41. DEFINE_EVENT(xprtrdma_reply_event, name, \
  42. TP_PROTO( \
  43. const struct rpcrdma_rep *rep \
  44. ), \
  45. TP_ARGS(rep))
  46. DECLARE_EVENT_CLASS(xprtrdma_rxprt,
  47. TP_PROTO(
  48. const struct rpcrdma_xprt *r_xprt
  49. ),
  50. TP_ARGS(r_xprt),
  51. TP_STRUCT__entry(
  52. __field(const void *, r_xprt)
  53. __string(addr, rpcrdma_addrstr(r_xprt))
  54. __string(port, rpcrdma_portstr(r_xprt))
  55. ),
  56. TP_fast_assign(
  57. __entry->r_xprt = r_xprt;
  58. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  59. __assign_str(port, rpcrdma_portstr(r_xprt));
  60. ),
  61. TP_printk("peer=[%s]:%s r_xprt=%p",
  62. __get_str(addr), __get_str(port), __entry->r_xprt
  63. )
  64. );
  65. #define DEFINE_RXPRT_EVENT(name) \
  66. DEFINE_EVENT(xprtrdma_rxprt, name, \
  67. TP_PROTO( \
  68. const struct rpcrdma_xprt *r_xprt \
  69. ), \
  70. TP_ARGS(r_xprt))
  71. DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
  72. TP_PROTO(
  73. const struct rpc_task *task,
  74. unsigned int pos,
  75. struct rpcrdma_mr *mr,
  76. int nsegs
  77. ),
  78. TP_ARGS(task, pos, mr, nsegs),
  79. TP_STRUCT__entry(
  80. __field(unsigned int, task_id)
  81. __field(unsigned int, client_id)
  82. __field(const void *, mr)
  83. __field(unsigned int, pos)
  84. __field(int, nents)
  85. __field(u32, handle)
  86. __field(u32, length)
  87. __field(u64, offset)
  88. __field(int, nsegs)
  89. ),
  90. TP_fast_assign(
  91. __entry->task_id = task->tk_pid;
  92. __entry->client_id = task->tk_client->cl_clid;
  93. __entry->mr = mr;
  94. __entry->pos = pos;
  95. __entry->nents = mr->mr_nents;
  96. __entry->handle = mr->mr_handle;
  97. __entry->length = mr->mr_length;
  98. __entry->offset = mr->mr_offset;
  99. __entry->nsegs = nsegs;
  100. ),
  101. TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
  102. __entry->task_id, __entry->client_id, __entry->mr,
  103. __entry->pos, __entry->length,
  104. (unsigned long long)__entry->offset, __entry->handle,
  105. __entry->nents < __entry->nsegs ? "more" : "last"
  106. )
  107. );
  108. #define DEFINE_RDCH_EVENT(name) \
  109. DEFINE_EVENT(xprtrdma_rdch_event, name, \
  110. TP_PROTO( \
  111. const struct rpc_task *task, \
  112. unsigned int pos, \
  113. struct rpcrdma_mr *mr, \
  114. int nsegs \
  115. ), \
  116. TP_ARGS(task, pos, mr, nsegs))
  117. DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
  118. TP_PROTO(
  119. const struct rpc_task *task,
  120. struct rpcrdma_mr *mr,
  121. int nsegs
  122. ),
  123. TP_ARGS(task, mr, nsegs),
  124. TP_STRUCT__entry(
  125. __field(unsigned int, task_id)
  126. __field(unsigned int, client_id)
  127. __field(const void *, mr)
  128. __field(int, nents)
  129. __field(u32, handle)
  130. __field(u32, length)
  131. __field(u64, offset)
  132. __field(int, nsegs)
  133. ),
  134. TP_fast_assign(
  135. __entry->task_id = task->tk_pid;
  136. __entry->client_id = task->tk_client->cl_clid;
  137. __entry->mr = mr;
  138. __entry->nents = mr->mr_nents;
  139. __entry->handle = mr->mr_handle;
  140. __entry->length = mr->mr_length;
  141. __entry->offset = mr->mr_offset;
  142. __entry->nsegs = nsegs;
  143. ),
  144. TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
  145. __entry->task_id, __entry->client_id, __entry->mr,
  146. __entry->length, (unsigned long long)__entry->offset,
  147. __entry->handle,
  148. __entry->nents < __entry->nsegs ? "more" : "last"
  149. )
  150. );
  151. #define DEFINE_WRCH_EVENT(name) \
  152. DEFINE_EVENT(xprtrdma_wrch_event, name, \
  153. TP_PROTO( \
  154. const struct rpc_task *task, \
  155. struct rpcrdma_mr *mr, \
  156. int nsegs \
  157. ), \
  158. TP_ARGS(task, mr, nsegs))
  159. TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
  160. TRACE_DEFINE_ENUM(FRWR_IS_VALID);
  161. TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
  162. TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
  163. #define xprtrdma_show_frwr_state(x) \
  164. __print_symbolic(x, \
  165. { FRWR_IS_INVALID, "INVALID" }, \
  166. { FRWR_IS_VALID, "VALID" }, \
  167. { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
  168. { FRWR_FLUSHED_LI, "FLUSHED_LI" })
  169. DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
  170. TP_PROTO(
  171. const struct ib_wc *wc,
  172. const struct rpcrdma_frwr *frwr
  173. ),
  174. TP_ARGS(wc, frwr),
  175. TP_STRUCT__entry(
  176. __field(const void *, mr)
  177. __field(unsigned int, state)
  178. __field(unsigned int, status)
  179. __field(unsigned int, vendor_err)
  180. ),
  181. TP_fast_assign(
  182. __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
  183. __entry->state = frwr->fr_state;
  184. __entry->status = wc->status;
  185. __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
  186. ),
  187. TP_printk(
  188. "mr=%p state=%s: %s (%u/0x%x)",
  189. __entry->mr, xprtrdma_show_frwr_state(__entry->state),
  190. rdma_show_wc_status(__entry->status),
  191. __entry->status, __entry->vendor_err
  192. )
  193. );
  194. #define DEFINE_FRWR_DONE_EVENT(name) \
  195. DEFINE_EVENT(xprtrdma_frwr_done, name, \
  196. TP_PROTO( \
  197. const struct ib_wc *wc, \
  198. const struct rpcrdma_frwr *frwr \
  199. ), \
  200. TP_ARGS(wc, frwr))
  201. DECLARE_EVENT_CLASS(xprtrdma_mr,
  202. TP_PROTO(
  203. const struct rpcrdma_mr *mr
  204. ),
  205. TP_ARGS(mr),
  206. TP_STRUCT__entry(
  207. __field(const void *, mr)
  208. __field(u32, handle)
  209. __field(u32, length)
  210. __field(u64, offset)
  211. ),
  212. TP_fast_assign(
  213. __entry->mr = mr;
  214. __entry->handle = mr->mr_handle;
  215. __entry->length = mr->mr_length;
  216. __entry->offset = mr->mr_offset;
  217. ),
  218. TP_printk("mr=%p %u@0x%016llx:0x%08x",
  219. __entry->mr, __entry->length,
  220. (unsigned long long)__entry->offset,
  221. __entry->handle
  222. )
  223. );
  224. #define DEFINE_MR_EVENT(name) \
  225. DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
  226. TP_PROTO( \
  227. const struct rpcrdma_mr *mr \
  228. ), \
  229. TP_ARGS(mr))
  230. DECLARE_EVENT_CLASS(xprtrdma_cb_event,
  231. TP_PROTO(
  232. const struct rpc_rqst *rqst
  233. ),
  234. TP_ARGS(rqst),
  235. TP_STRUCT__entry(
  236. __field(const void *, rqst)
  237. __field(const void *, rep)
  238. __field(const void *, req)
  239. __field(u32, xid)
  240. ),
  241. TP_fast_assign(
  242. __entry->rqst = rqst;
  243. __entry->req = rpcr_to_rdmar(rqst);
  244. __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
  245. __entry->xid = be32_to_cpu(rqst->rq_xid);
  246. ),
  247. TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
  248. __entry->xid, __entry->rqst, __entry->req, __entry->rep
  249. )
  250. );
  251. #define DEFINE_CB_EVENT(name) \
  252. DEFINE_EVENT(xprtrdma_cb_event, name, \
  253. TP_PROTO( \
  254. const struct rpc_rqst *rqst \
  255. ), \
  256. TP_ARGS(rqst))
  257. /**
  258. ** Connection events
  259. **/
  260. TRACE_EVENT(xprtrdma_cm_event,
  261. TP_PROTO(
  262. const struct rpcrdma_xprt *r_xprt,
  263. struct rdma_cm_event *event
  264. ),
  265. TP_ARGS(r_xprt, event),
  266. TP_STRUCT__entry(
  267. __field(const void *, r_xprt)
  268. __field(unsigned int, event)
  269. __field(int, status)
  270. __string(addr, rpcrdma_addrstr(r_xprt))
  271. __string(port, rpcrdma_portstr(r_xprt))
  272. ),
  273. TP_fast_assign(
  274. __entry->r_xprt = r_xprt;
  275. __entry->event = event->event;
  276. __entry->status = event->status;
  277. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  278. __assign_str(port, rpcrdma_portstr(r_xprt));
  279. ),
  280. TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
  281. __get_str(addr), __get_str(port),
  282. __entry->r_xprt, rdma_show_cm_event(__entry->event),
  283. __entry->event, __entry->status
  284. )
  285. );
  286. TRACE_EVENT(xprtrdma_disconnect,
  287. TP_PROTO(
  288. const struct rpcrdma_xprt *r_xprt,
  289. int status
  290. ),
  291. TP_ARGS(r_xprt, status),
  292. TP_STRUCT__entry(
  293. __field(const void *, r_xprt)
  294. __field(int, status)
  295. __field(int, connected)
  296. __string(addr, rpcrdma_addrstr(r_xprt))
  297. __string(port, rpcrdma_portstr(r_xprt))
  298. ),
  299. TP_fast_assign(
  300. __entry->r_xprt = r_xprt;
  301. __entry->status = status;
  302. __entry->connected = r_xprt->rx_ep.rep_connected;
  303. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  304. __assign_str(port, rpcrdma_portstr(r_xprt));
  305. ),
  306. TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
  307. __get_str(addr), __get_str(port),
  308. __entry->r_xprt, __entry->status,
  309. __entry->connected == 1 ? "still " : "dis"
  310. )
  311. );
  312. DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
  313. DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
  314. DEFINE_RXPRT_EVENT(xprtrdma_create);
  315. DEFINE_RXPRT_EVENT(xprtrdma_destroy);
  316. DEFINE_RXPRT_EVENT(xprtrdma_remove);
  317. DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
  318. DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
  319. DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
  320. TRACE_EVENT(xprtrdma_qp_event,
  321. TP_PROTO(
  322. const struct rpcrdma_xprt *r_xprt,
  323. const struct ib_event *event
  324. ),
  325. TP_ARGS(r_xprt, event),
  326. TP_STRUCT__entry(
  327. __field(const void *, r_xprt)
  328. __field(unsigned int, event)
  329. __string(name, event->device->name)
  330. __string(addr, rpcrdma_addrstr(r_xprt))
  331. __string(port, rpcrdma_portstr(r_xprt))
  332. ),
  333. TP_fast_assign(
  334. __entry->r_xprt = r_xprt;
  335. __entry->event = event->event;
  336. __assign_str(name, event->device->name);
  337. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  338. __assign_str(port, rpcrdma_portstr(r_xprt));
  339. ),
  340. TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
  341. __get_str(addr), __get_str(port), __entry->r_xprt,
  342. __get_str(name), rdma_show_ib_event(__entry->event),
  343. __entry->event
  344. )
  345. );
  346. /**
  347. ** Call events
  348. **/
  349. TRACE_EVENT(xprtrdma_createmrs,
  350. TP_PROTO(
  351. const struct rpcrdma_xprt *r_xprt,
  352. unsigned int count
  353. ),
  354. TP_ARGS(r_xprt, count),
  355. TP_STRUCT__entry(
  356. __field(const void *, r_xprt)
  357. __field(unsigned int, count)
  358. ),
  359. TP_fast_assign(
  360. __entry->r_xprt = r_xprt;
  361. __entry->count = count;
  362. ),
  363. TP_printk("r_xprt=%p: created %u MRs",
  364. __entry->r_xprt, __entry->count
  365. )
  366. );
  367. DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
  368. DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
  369. DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
  370. DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
  371. TRACE_DEFINE_ENUM(rpcrdma_noch);
  372. TRACE_DEFINE_ENUM(rpcrdma_readch);
  373. TRACE_DEFINE_ENUM(rpcrdma_areadch);
  374. TRACE_DEFINE_ENUM(rpcrdma_writech);
  375. TRACE_DEFINE_ENUM(rpcrdma_replych);
  376. #define xprtrdma_show_chunktype(x) \
  377. __print_symbolic(x, \
  378. { rpcrdma_noch, "inline" }, \
  379. { rpcrdma_readch, "read list" }, \
  380. { rpcrdma_areadch, "*read list" }, \
  381. { rpcrdma_writech, "write list" }, \
  382. { rpcrdma_replych, "reply chunk" })
  383. TRACE_EVENT(xprtrdma_marshal,
  384. TP_PROTO(
  385. const struct rpc_rqst *rqst,
  386. unsigned int hdrlen,
  387. unsigned int rtype,
  388. unsigned int wtype
  389. ),
  390. TP_ARGS(rqst, hdrlen, rtype, wtype),
  391. TP_STRUCT__entry(
  392. __field(unsigned int, task_id)
  393. __field(unsigned int, client_id)
  394. __field(u32, xid)
  395. __field(unsigned int, hdrlen)
  396. __field(unsigned int, headlen)
  397. __field(unsigned int, pagelen)
  398. __field(unsigned int, taillen)
  399. __field(unsigned int, rtype)
  400. __field(unsigned int, wtype)
  401. ),
  402. TP_fast_assign(
  403. __entry->task_id = rqst->rq_task->tk_pid;
  404. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  405. __entry->xid = be32_to_cpu(rqst->rq_xid);
  406. __entry->hdrlen = hdrlen;
  407. __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
  408. __entry->pagelen = rqst->rq_snd_buf.page_len;
  409. __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
  410. __entry->rtype = rtype;
  411. __entry->wtype = wtype;
  412. ),
  413. TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
  414. __entry->task_id, __entry->client_id, __entry->xid,
  415. __entry->hdrlen,
  416. __entry->headlen, __entry->pagelen, __entry->taillen,
  417. xprtrdma_show_chunktype(__entry->rtype),
  418. xprtrdma_show_chunktype(__entry->wtype)
  419. )
  420. );
  421. TRACE_EVENT(xprtrdma_post_send,
  422. TP_PROTO(
  423. const struct rpcrdma_req *req,
  424. int status
  425. ),
  426. TP_ARGS(req, status),
  427. TP_STRUCT__entry(
  428. __field(const void *, req)
  429. __field(int, num_sge)
  430. __field(int, signaled)
  431. __field(int, status)
  432. ),
  433. TP_fast_assign(
  434. __entry->req = req;
  435. __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
  436. __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
  437. IB_SEND_SIGNALED;
  438. __entry->status = status;
  439. ),
  440. TP_printk("req=%p, %d SGEs%s, status=%d",
  441. __entry->req, __entry->num_sge,
  442. (__entry->signaled ? ", signaled" : ""),
  443. __entry->status
  444. )
  445. );
  446. TRACE_EVENT(xprtrdma_post_recv,
  447. TP_PROTO(
  448. const struct ib_cqe *cqe
  449. ),
  450. TP_ARGS(cqe),
  451. TP_STRUCT__entry(
  452. __field(const void *, cqe)
  453. ),
  454. TP_fast_assign(
  455. __entry->cqe = cqe;
  456. ),
  457. TP_printk("cqe=%p",
  458. __entry->cqe
  459. )
  460. );
  461. TRACE_EVENT(xprtrdma_post_recvs,
  462. TP_PROTO(
  463. const struct rpcrdma_xprt *r_xprt,
  464. unsigned int count,
  465. int status
  466. ),
  467. TP_ARGS(r_xprt, count, status),
  468. TP_STRUCT__entry(
  469. __field(const void *, r_xprt)
  470. __field(unsigned int, count)
  471. __field(int, status)
  472. __field(int, posted)
  473. __string(addr, rpcrdma_addrstr(r_xprt))
  474. __string(port, rpcrdma_portstr(r_xprt))
  475. ),
  476. TP_fast_assign(
  477. __entry->r_xprt = r_xprt;
  478. __entry->count = count;
  479. __entry->status = status;
  480. __entry->posted = r_xprt->rx_buf.rb_posted_receives;
  481. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  482. __assign_str(port, rpcrdma_portstr(r_xprt));
  483. ),
  484. TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
  485. __get_str(addr), __get_str(port), __entry->r_xprt,
  486. __entry->count, __entry->posted, __entry->status
  487. )
  488. );
  489. /**
  490. ** Completion events
  491. **/
  492. TRACE_EVENT(xprtrdma_wc_send,
  493. TP_PROTO(
  494. const struct rpcrdma_sendctx *sc,
  495. const struct ib_wc *wc
  496. ),
  497. TP_ARGS(sc, wc),
  498. TP_STRUCT__entry(
  499. __field(const void *, req)
  500. __field(unsigned int, unmap_count)
  501. __field(unsigned int, status)
  502. __field(unsigned int, vendor_err)
  503. ),
  504. TP_fast_assign(
  505. __entry->req = sc->sc_req;
  506. __entry->unmap_count = sc->sc_unmap_count;
  507. __entry->status = wc->status;
  508. __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
  509. ),
  510. TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
  511. __entry->req, __entry->unmap_count,
  512. rdma_show_wc_status(__entry->status),
  513. __entry->status, __entry->vendor_err
  514. )
  515. );
  516. TRACE_EVENT(xprtrdma_wc_receive,
  517. TP_PROTO(
  518. const struct ib_wc *wc
  519. ),
  520. TP_ARGS(wc),
  521. TP_STRUCT__entry(
  522. __field(const void *, cqe)
  523. __field(u32, byte_len)
  524. __field(unsigned int, status)
  525. __field(u32, vendor_err)
  526. ),
  527. TP_fast_assign(
  528. __entry->cqe = wc->wr_cqe;
  529. __entry->status = wc->status;
  530. if (wc->status) {
  531. __entry->byte_len = 0;
  532. __entry->vendor_err = wc->vendor_err;
  533. } else {
  534. __entry->byte_len = wc->byte_len;
  535. __entry->vendor_err = 0;
  536. }
  537. ),
  538. TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
  539. __entry->cqe, __entry->byte_len,
  540. rdma_show_wc_status(__entry->status),
  541. __entry->status, __entry->vendor_err
  542. )
  543. );
  544. DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
  545. DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
  546. DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
  547. DEFINE_MR_EVENT(localinv);
  548. DEFINE_MR_EVENT(map);
  549. DEFINE_MR_EVENT(unmap);
  550. DEFINE_MR_EVENT(remoteinv);
  551. DEFINE_MR_EVENT(recycle);
  552. /**
  553. ** Reply events
  554. **/
  555. TRACE_EVENT(xprtrdma_reply,
  556. TP_PROTO(
  557. const struct rpc_task *task,
  558. const struct rpcrdma_rep *rep,
  559. const struct rpcrdma_req *req,
  560. unsigned int credits
  561. ),
  562. TP_ARGS(task, rep, req, credits),
  563. TP_STRUCT__entry(
  564. __field(unsigned int, task_id)
  565. __field(unsigned int, client_id)
  566. __field(const void *, rep)
  567. __field(const void *, req)
  568. __field(u32, xid)
  569. __field(unsigned int, credits)
  570. ),
  571. TP_fast_assign(
  572. __entry->task_id = task->tk_pid;
  573. __entry->client_id = task->tk_client->cl_clid;
  574. __entry->rep = rep;
  575. __entry->req = req;
  576. __entry->xid = be32_to_cpu(rep->rr_xid);
  577. __entry->credits = credits;
  578. ),
  579. TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
  580. __entry->task_id, __entry->client_id, __entry->xid,
  581. __entry->credits, __entry->rep, __entry->req
  582. )
  583. );
  584. TRACE_EVENT(xprtrdma_defer_cmp,
  585. TP_PROTO(
  586. const struct rpcrdma_rep *rep
  587. ),
  588. TP_ARGS(rep),
  589. TP_STRUCT__entry(
  590. __field(unsigned int, task_id)
  591. __field(unsigned int, client_id)
  592. __field(const void *, rep)
  593. __field(u32, xid)
  594. ),
  595. TP_fast_assign(
  596. __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
  597. __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
  598. __entry->rep = rep;
  599. __entry->xid = be32_to_cpu(rep->rr_xid);
  600. ),
  601. TP_printk("task:%u@%u xid=0x%08x rep=%p",
  602. __entry->task_id, __entry->client_id, __entry->xid,
  603. __entry->rep
  604. )
  605. );
  606. DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
  607. DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
  608. DEFINE_REPLY_EVENT(xprtrdma_reply_short);
  609. DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
  610. TRACE_EVENT(xprtrdma_fixup,
  611. TP_PROTO(
  612. const struct rpc_rqst *rqst,
  613. int len,
  614. int hdrlen
  615. ),
  616. TP_ARGS(rqst, len, hdrlen),
  617. TP_STRUCT__entry(
  618. __field(unsigned int, task_id)
  619. __field(unsigned int, client_id)
  620. __field(const void *, base)
  621. __field(int, len)
  622. __field(int, hdrlen)
  623. ),
  624. TP_fast_assign(
  625. __entry->task_id = rqst->rq_task->tk_pid;
  626. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  627. __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
  628. __entry->len = len;
  629. __entry->hdrlen = hdrlen;
  630. ),
  631. TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
  632. __entry->task_id, __entry->client_id,
  633. __entry->base, __entry->len, __entry->hdrlen
  634. )
  635. );
  636. TRACE_EVENT(xprtrdma_fixup_pg,
  637. TP_PROTO(
  638. const struct rpc_rqst *rqst,
  639. int pageno,
  640. const void *pos,
  641. int len,
  642. int curlen
  643. ),
  644. TP_ARGS(rqst, pageno, pos, len, curlen),
  645. TP_STRUCT__entry(
  646. __field(unsigned int, task_id)
  647. __field(unsigned int, client_id)
  648. __field(const void *, pos)
  649. __field(int, pageno)
  650. __field(int, len)
  651. __field(int, curlen)
  652. ),
  653. TP_fast_assign(
  654. __entry->task_id = rqst->rq_task->tk_pid;
  655. __entry->client_id = rqst->rq_task->tk_client->cl_clid;
  656. __entry->pos = pos;
  657. __entry->pageno = pageno;
  658. __entry->len = len;
  659. __entry->curlen = curlen;
  660. ),
  661. TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
  662. __entry->task_id, __entry->client_id,
  663. __entry->pageno, __entry->pos, __entry->len, __entry->curlen
  664. )
  665. );
  666. TRACE_EVENT(xprtrdma_decode_seg,
  667. TP_PROTO(
  668. u32 handle,
  669. u32 length,
  670. u64 offset
  671. ),
  672. TP_ARGS(handle, length, offset),
  673. TP_STRUCT__entry(
  674. __field(u32, handle)
  675. __field(u32, length)
  676. __field(u64, offset)
  677. ),
  678. TP_fast_assign(
  679. __entry->handle = handle;
  680. __entry->length = length;
  681. __entry->offset = offset;
  682. ),
  683. TP_printk("%u@0x%016llx:0x%08x",
  684. __entry->length, (unsigned long long)__entry->offset,
  685. __entry->handle
  686. )
  687. );
  688. /**
  689. ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
  690. **/
  691. TRACE_EVENT(xprtrdma_allocate,
  692. TP_PROTO(
  693. const struct rpc_task *task,
  694. const struct rpcrdma_req *req
  695. ),
  696. TP_ARGS(task, req),
  697. TP_STRUCT__entry(
  698. __field(unsigned int, task_id)
  699. __field(unsigned int, client_id)
  700. __field(const void *, req)
  701. __field(size_t, callsize)
  702. __field(size_t, rcvsize)
  703. ),
  704. TP_fast_assign(
  705. __entry->task_id = task->tk_pid;
  706. __entry->client_id = task->tk_client->cl_clid;
  707. __entry->req = req;
  708. __entry->callsize = task->tk_rqstp->rq_callsize;
  709. __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
  710. ),
  711. TP_printk("task:%u@%u req=%p (%zu, %zu)",
  712. __entry->task_id, __entry->client_id,
  713. __entry->req, __entry->callsize, __entry->rcvsize
  714. )
  715. );
  716. TRACE_EVENT(xprtrdma_rpc_done,
  717. TP_PROTO(
  718. const struct rpc_task *task,
  719. const struct rpcrdma_req *req
  720. ),
  721. TP_ARGS(task, req),
  722. TP_STRUCT__entry(
  723. __field(unsigned int, task_id)
  724. __field(unsigned int, client_id)
  725. __field(const void *, req)
  726. __field(const void *, rep)
  727. ),
  728. TP_fast_assign(
  729. __entry->task_id = task->tk_pid;
  730. __entry->client_id = task->tk_client->cl_clid;
  731. __entry->req = req;
  732. __entry->rep = req->rl_reply;
  733. ),
  734. TP_printk("task:%u@%u req=%p rep=%p",
  735. __entry->task_id, __entry->client_id,
  736. __entry->req, __entry->rep
  737. )
  738. );
  739. /**
  740. ** Callback events
  741. **/
  742. TRACE_EVENT(xprtrdma_cb_setup,
  743. TP_PROTO(
  744. const struct rpcrdma_xprt *r_xprt,
  745. unsigned int reqs
  746. ),
  747. TP_ARGS(r_xprt, reqs),
  748. TP_STRUCT__entry(
  749. __field(const void *, r_xprt)
  750. __field(unsigned int, reqs)
  751. __string(addr, rpcrdma_addrstr(r_xprt))
  752. __string(port, rpcrdma_portstr(r_xprt))
  753. ),
  754. TP_fast_assign(
  755. __entry->r_xprt = r_xprt;
  756. __entry->reqs = reqs;
  757. __assign_str(addr, rpcrdma_addrstr(r_xprt));
  758. __assign_str(port, rpcrdma_portstr(r_xprt));
  759. ),
  760. TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
  761. __get_str(addr), __get_str(port),
  762. __entry->r_xprt, __entry->reqs
  763. )
  764. );
  765. DEFINE_CB_EVENT(xprtrdma_cb_call);
  766. DEFINE_CB_EVENT(xprtrdma_cb_reply);
  767. /**
  768. ** Server-side RPC/RDMA events
  769. **/
  770. DECLARE_EVENT_CLASS(svcrdma_xprt_event,
  771. TP_PROTO(
  772. const struct svc_xprt *xprt
  773. ),
  774. TP_ARGS(xprt),
  775. TP_STRUCT__entry(
  776. __field(const void *, xprt)
  777. __string(addr, xprt->xpt_remotebuf)
  778. ),
  779. TP_fast_assign(
  780. __entry->xprt = xprt;
  781. __assign_str(addr, xprt->xpt_remotebuf);
  782. ),
  783. TP_printk("xprt=%p addr=%s",
  784. __entry->xprt, __get_str(addr)
  785. )
  786. );
  787. #define DEFINE_XPRT_EVENT(name) \
  788. DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
  789. TP_PROTO( \
  790. const struct svc_xprt *xprt \
  791. ), \
  792. TP_ARGS(xprt))
  793. DEFINE_XPRT_EVENT(accept);
  794. DEFINE_XPRT_EVENT(fail);
  795. DEFINE_XPRT_EVENT(free);
  796. TRACE_DEFINE_ENUM(RDMA_MSG);
  797. TRACE_DEFINE_ENUM(RDMA_NOMSG);
  798. TRACE_DEFINE_ENUM(RDMA_MSGP);
  799. TRACE_DEFINE_ENUM(RDMA_DONE);
  800. TRACE_DEFINE_ENUM(RDMA_ERROR);
  801. #define show_rpcrdma_proc(x) \
  802. __print_symbolic(x, \
  803. { RDMA_MSG, "RDMA_MSG" }, \
  804. { RDMA_NOMSG, "RDMA_NOMSG" }, \
  805. { RDMA_MSGP, "RDMA_MSGP" }, \
  806. { RDMA_DONE, "RDMA_DONE" }, \
  807. { RDMA_ERROR, "RDMA_ERROR" })
  808. TRACE_EVENT(svcrdma_decode_rqst,
  809. TP_PROTO(
  810. __be32 *p,
  811. unsigned int hdrlen
  812. ),
  813. TP_ARGS(p, hdrlen),
  814. TP_STRUCT__entry(
  815. __field(u32, xid)
  816. __field(u32, vers)
  817. __field(u32, proc)
  818. __field(u32, credits)
  819. __field(unsigned int, hdrlen)
  820. ),
  821. TP_fast_assign(
  822. __entry->xid = be32_to_cpup(p++);
  823. __entry->vers = be32_to_cpup(p++);
  824. __entry->credits = be32_to_cpup(p++);
  825. __entry->proc = be32_to_cpup(p);
  826. __entry->hdrlen = hdrlen;
  827. ),
  828. TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
  829. __entry->xid, __entry->vers, __entry->credits,
  830. show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
  831. );
  832. TRACE_EVENT(svcrdma_decode_short,
  833. TP_PROTO(
  834. unsigned int hdrlen
  835. ),
  836. TP_ARGS(hdrlen),
  837. TP_STRUCT__entry(
  838. __field(unsigned int, hdrlen)
  839. ),
  840. TP_fast_assign(
  841. __entry->hdrlen = hdrlen;
  842. ),
  843. TP_printk("hdrlen=%u", __entry->hdrlen)
  844. );
  845. DECLARE_EVENT_CLASS(svcrdma_badreq_event,
  846. TP_PROTO(
  847. __be32 *p
  848. ),
  849. TP_ARGS(p),
  850. TP_STRUCT__entry(
  851. __field(u32, xid)
  852. __field(u32, vers)
  853. __field(u32, proc)
  854. __field(u32, credits)
  855. ),
  856. TP_fast_assign(
  857. __entry->xid = be32_to_cpup(p++);
  858. __entry->vers = be32_to_cpup(p++);
  859. __entry->credits = be32_to_cpup(p++);
  860. __entry->proc = be32_to_cpup(p);
  861. ),
  862. TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
  863. __entry->xid, __entry->vers, __entry->credits, __entry->proc)
  864. );
  865. #define DEFINE_BADREQ_EVENT(name) \
  866. DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
  867. TP_PROTO( \
  868. __be32 *p \
  869. ), \
  870. TP_ARGS(p))
  871. DEFINE_BADREQ_EVENT(badvers);
  872. DEFINE_BADREQ_EVENT(drop);
  873. DEFINE_BADREQ_EVENT(badproc);
  874. DEFINE_BADREQ_EVENT(parse);
  875. DECLARE_EVENT_CLASS(svcrdma_segment_event,
  876. TP_PROTO(
  877. u32 handle,
  878. u32 length,
  879. u64 offset
  880. ),
  881. TP_ARGS(handle, length, offset),
  882. TP_STRUCT__entry(
  883. __field(u32, handle)
  884. __field(u32, length)
  885. __field(u64, offset)
  886. ),
  887. TP_fast_assign(
  888. __entry->handle = handle;
  889. __entry->length = length;
  890. __entry->offset = offset;
  891. ),
  892. TP_printk("%u@0x%016llx:0x%08x",
  893. __entry->length, (unsigned long long)__entry->offset,
  894. __entry->handle
  895. )
  896. );
  897. #define DEFINE_SEGMENT_EVENT(name) \
  898. DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
  899. TP_PROTO( \
  900. u32 handle, \
  901. u32 length, \
  902. u64 offset \
  903. ), \
  904. TP_ARGS(handle, length, offset))
  905. DEFINE_SEGMENT_EVENT(rseg);
  906. DEFINE_SEGMENT_EVENT(wseg);
  907. DECLARE_EVENT_CLASS(svcrdma_chunk_event,
  908. TP_PROTO(
  909. u32 length
  910. ),
  911. TP_ARGS(length),
  912. TP_STRUCT__entry(
  913. __field(u32, length)
  914. ),
  915. TP_fast_assign(
  916. __entry->length = length;
  917. ),
  918. TP_printk("length=%u",
  919. __entry->length
  920. )
  921. );
  922. #define DEFINE_CHUNK_EVENT(name) \
  923. DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
  924. TP_PROTO( \
  925. u32 length \
  926. ), \
  927. TP_ARGS(length))
  928. DEFINE_CHUNK_EVENT(pzr);
  929. DEFINE_CHUNK_EVENT(write);
  930. DEFINE_CHUNK_EVENT(reply);
  931. TRACE_EVENT(svcrdma_encode_read,
  932. TP_PROTO(
  933. u32 length,
  934. u32 position
  935. ),
  936. TP_ARGS(length, position),
  937. TP_STRUCT__entry(
  938. __field(u32, length)
  939. __field(u32, position)
  940. ),
  941. TP_fast_assign(
  942. __entry->length = length;
  943. __entry->position = position;
  944. ),
  945. TP_printk("length=%u position=%u",
  946. __entry->length, __entry->position
  947. )
  948. );
  949. DECLARE_EVENT_CLASS(svcrdma_error_event,
  950. TP_PROTO(
  951. __be32 xid
  952. ),
  953. TP_ARGS(xid),
  954. TP_STRUCT__entry(
  955. __field(u32, xid)
  956. ),
  957. TP_fast_assign(
  958. __entry->xid = be32_to_cpu(xid);
  959. ),
  960. TP_printk("xid=0x%08x",
  961. __entry->xid
  962. )
  963. );
  964. #define DEFINE_ERROR_EVENT(name) \
  965. DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
  966. TP_PROTO( \
  967. __be32 xid \
  968. ), \
  969. TP_ARGS(xid))
  970. DEFINE_ERROR_EVENT(vers);
  971. DEFINE_ERROR_EVENT(chunk);
  972. /**
  973. ** Server-side RDMA API events
  974. **/
  975. TRACE_EVENT(svcrdma_dma_map_page,
  976. TP_PROTO(
  977. const struct svcxprt_rdma *rdma,
  978. const void *page
  979. ),
  980. TP_ARGS(rdma, page),
  981. TP_STRUCT__entry(
  982. __field(const void *, page);
  983. __string(device, rdma->sc_cm_id->device->name)
  984. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  985. ),
  986. TP_fast_assign(
  987. __entry->page = page;
  988. __assign_str(device, rdma->sc_cm_id->device->name);
  989. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  990. ),
  991. TP_printk("addr=%s device=%s page=%p",
  992. __get_str(addr), __get_str(device), __entry->page
  993. )
  994. );
  995. TRACE_EVENT(svcrdma_dma_map_rwctx,
  996. TP_PROTO(
  997. const struct svcxprt_rdma *rdma,
  998. int status
  999. ),
  1000. TP_ARGS(rdma, status),
  1001. TP_STRUCT__entry(
  1002. __field(int, status)
  1003. __string(device, rdma->sc_cm_id->device->name)
  1004. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1005. ),
  1006. TP_fast_assign(
  1007. __entry->status = status;
  1008. __assign_str(device, rdma->sc_cm_id->device->name);
  1009. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1010. ),
  1011. TP_printk("addr=%s device=%s status=%d",
  1012. __get_str(addr), __get_str(device), __entry->status
  1013. )
  1014. );
  1015. TRACE_EVENT(svcrdma_send_failed,
  1016. TP_PROTO(
  1017. const struct svc_rqst *rqst,
  1018. int status
  1019. ),
  1020. TP_ARGS(rqst, status),
  1021. TP_STRUCT__entry(
  1022. __field(int, status)
  1023. __field(u32, xid)
  1024. __field(const void *, xprt)
  1025. __string(addr, rqst->rq_xprt->xpt_remotebuf)
  1026. ),
  1027. TP_fast_assign(
  1028. __entry->status = status;
  1029. __entry->xid = __be32_to_cpu(rqst->rq_xid);
  1030. __entry->xprt = rqst->rq_xprt;
  1031. __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
  1032. ),
  1033. TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
  1034. __entry->xprt, __get_str(addr),
  1035. __entry->xid, __entry->status
  1036. )
  1037. );
  1038. DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
  1039. TP_PROTO(
  1040. const struct ib_wc *wc
  1041. ),
  1042. TP_ARGS(wc),
  1043. TP_STRUCT__entry(
  1044. __field(const void *, cqe)
  1045. __field(unsigned int, status)
  1046. __field(unsigned int, vendor_err)
  1047. ),
  1048. TP_fast_assign(
  1049. __entry->cqe = wc->wr_cqe;
  1050. __entry->status = wc->status;
  1051. if (wc->status)
  1052. __entry->vendor_err = wc->vendor_err;
  1053. else
  1054. __entry->vendor_err = 0;
  1055. ),
  1056. TP_printk("cqe=%p status=%s (%u/0x%x)",
  1057. __entry->cqe, rdma_show_wc_status(__entry->status),
  1058. __entry->status, __entry->vendor_err
  1059. )
  1060. );
  1061. #define DEFINE_SENDCOMP_EVENT(name) \
  1062. DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
  1063. TP_PROTO( \
  1064. const struct ib_wc *wc \
  1065. ), \
  1066. TP_ARGS(wc))
  1067. TRACE_EVENT(svcrdma_post_send,
  1068. TP_PROTO(
  1069. const struct ib_send_wr *wr,
  1070. int status
  1071. ),
  1072. TP_ARGS(wr, status),
  1073. TP_STRUCT__entry(
  1074. __field(const void *, cqe)
  1075. __field(unsigned int, num_sge)
  1076. __field(u32, inv_rkey)
  1077. __field(int, status)
  1078. ),
  1079. TP_fast_assign(
  1080. __entry->cqe = wr->wr_cqe;
  1081. __entry->num_sge = wr->num_sge;
  1082. __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
  1083. wr->ex.invalidate_rkey : 0;
  1084. __entry->status = status;
  1085. ),
  1086. TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
  1087. __entry->cqe, __entry->num_sge,
  1088. __entry->inv_rkey, __entry->status
  1089. )
  1090. );
  1091. DEFINE_SENDCOMP_EVENT(send);
  1092. TRACE_EVENT(svcrdma_post_recv,
  1093. TP_PROTO(
  1094. const struct ib_recv_wr *wr,
  1095. int status
  1096. ),
  1097. TP_ARGS(wr, status),
  1098. TP_STRUCT__entry(
  1099. __field(const void *, cqe)
  1100. __field(int, status)
  1101. ),
  1102. TP_fast_assign(
  1103. __entry->cqe = wr->wr_cqe;
  1104. __entry->status = status;
  1105. ),
  1106. TP_printk("cqe=%p status=%d",
  1107. __entry->cqe, __entry->status
  1108. )
  1109. );
  1110. TRACE_EVENT(svcrdma_wc_receive,
  1111. TP_PROTO(
  1112. const struct ib_wc *wc
  1113. ),
  1114. TP_ARGS(wc),
  1115. TP_STRUCT__entry(
  1116. __field(const void *, cqe)
  1117. __field(u32, byte_len)
  1118. __field(unsigned int, status)
  1119. __field(u32, vendor_err)
  1120. ),
  1121. TP_fast_assign(
  1122. __entry->cqe = wc->wr_cqe;
  1123. __entry->status = wc->status;
  1124. if (wc->status) {
  1125. __entry->byte_len = 0;
  1126. __entry->vendor_err = wc->vendor_err;
  1127. } else {
  1128. __entry->byte_len = wc->byte_len;
  1129. __entry->vendor_err = 0;
  1130. }
  1131. ),
  1132. TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
  1133. __entry->cqe, __entry->byte_len,
  1134. rdma_show_wc_status(__entry->status),
  1135. __entry->status, __entry->vendor_err
  1136. )
  1137. );
  1138. TRACE_EVENT(svcrdma_post_rw,
  1139. TP_PROTO(
  1140. const void *cqe,
  1141. int sqecount,
  1142. int status
  1143. ),
  1144. TP_ARGS(cqe, sqecount, status),
  1145. TP_STRUCT__entry(
  1146. __field(const void *, cqe)
  1147. __field(int, sqecount)
  1148. __field(int, status)
  1149. ),
  1150. TP_fast_assign(
  1151. __entry->cqe = cqe;
  1152. __entry->sqecount = sqecount;
  1153. __entry->status = status;
  1154. ),
  1155. TP_printk("cqe=%p sqecount=%d status=%d",
  1156. __entry->cqe, __entry->sqecount, __entry->status
  1157. )
  1158. );
  1159. DEFINE_SENDCOMP_EVENT(read);
  1160. DEFINE_SENDCOMP_EVENT(write);
  1161. TRACE_EVENT(svcrdma_cm_event,
  1162. TP_PROTO(
  1163. const struct rdma_cm_event *event,
  1164. const struct sockaddr *sap
  1165. ),
  1166. TP_ARGS(event, sap),
  1167. TP_STRUCT__entry(
  1168. __field(unsigned int, event)
  1169. __field(int, status)
  1170. __array(__u8, addr, INET6_ADDRSTRLEN + 10)
  1171. ),
  1172. TP_fast_assign(
  1173. __entry->event = event->event;
  1174. __entry->status = event->status;
  1175. snprintf(__entry->addr, sizeof(__entry->addr) - 1,
  1176. "%pISpc", sap);
  1177. ),
  1178. TP_printk("addr=%s event=%s (%u/%d)",
  1179. __entry->addr,
  1180. rdma_show_cm_event(__entry->event),
  1181. __entry->event, __entry->status
  1182. )
  1183. );
  1184. TRACE_EVENT(svcrdma_qp_error,
  1185. TP_PROTO(
  1186. const struct ib_event *event,
  1187. const struct sockaddr *sap
  1188. ),
  1189. TP_ARGS(event, sap),
  1190. TP_STRUCT__entry(
  1191. __field(unsigned int, event)
  1192. __string(device, event->device->name)
  1193. __array(__u8, addr, INET6_ADDRSTRLEN + 10)
  1194. ),
  1195. TP_fast_assign(
  1196. __entry->event = event->event;
  1197. __assign_str(device, event->device->name);
  1198. snprintf(__entry->addr, sizeof(__entry->addr) - 1,
  1199. "%pISpc", sap);
  1200. ),
  1201. TP_printk("addr=%s dev=%s event=%s (%u)",
  1202. __entry->addr, __get_str(device),
  1203. rdma_show_ib_event(__entry->event), __entry->event
  1204. )
  1205. );
  1206. DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
  1207. TP_PROTO(
  1208. const struct svcxprt_rdma *rdma
  1209. ),
  1210. TP_ARGS(rdma),
  1211. TP_STRUCT__entry(
  1212. __field(int, avail)
  1213. __field(int, depth)
  1214. __string(addr, rdma->sc_xprt.xpt_remotebuf)
  1215. ),
  1216. TP_fast_assign(
  1217. __entry->avail = atomic_read(&rdma->sc_sq_avail);
  1218. __entry->depth = rdma->sc_sq_depth;
  1219. __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
  1220. ),
  1221. TP_printk("addr=%s sc_sq_avail=%d/%d",
  1222. __get_str(addr), __entry->avail, __entry->depth
  1223. )
  1224. );
  1225. #define DEFINE_SQ_EVENT(name) \
  1226. DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
  1227. TP_PROTO( \
  1228. const struct svcxprt_rdma *rdma \
  1229. ), \
  1230. TP_ARGS(rdma))
  1231. DEFINE_SQ_EVENT(full);
  1232. DEFINE_SQ_EVENT(retry);
  1233. #endif /* _TRACE_RPCRDMA_H */
  1234. #include <trace/define_trace.h>