xhci-trace.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xHCI host controller driver
  4. *
  5. * Copyright (C) 2013 Xenia Ragiadakou
  6. *
  7. * Author: Xenia Ragiadakou
  8. * Email : burzalodowa@gmail.com
  9. */
  10. #undef TRACE_SYSTEM
  11. #define TRACE_SYSTEM xhci-hcd
  12. /*
  13. * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
  14. * legitimate C variable. It is not exported to user space.
  15. */
  16. #undef TRACE_SYSTEM_VAR
  17. #define TRACE_SYSTEM_VAR xhci_hcd
  18. #if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
  19. #define __XHCI_TRACE_H
  20. #include <linux/tracepoint.h>
  21. #include "xhci.h"
  22. #include "xhci-dbgcap.h"
  23. #define XHCI_MSG_MAX 500
  24. DECLARE_EVENT_CLASS(xhci_log_msg,
  25. TP_PROTO(struct va_format *vaf),
  26. TP_ARGS(vaf),
  27. TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
  28. TP_fast_assign(
  29. vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
  30. ),
  31. TP_printk("%s", __get_str(msg))
  32. );
  33. DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
  34. TP_PROTO(struct va_format *vaf),
  35. TP_ARGS(vaf)
  36. );
  37. DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
  38. TP_PROTO(struct va_format *vaf),
  39. TP_ARGS(vaf)
  40. );
  41. DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
  42. TP_PROTO(struct va_format *vaf),
  43. TP_ARGS(vaf)
  44. );
  45. DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
  46. TP_PROTO(struct va_format *vaf),
  47. TP_ARGS(vaf)
  48. );
  49. DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
  50. TP_PROTO(struct va_format *vaf),
  51. TP_ARGS(vaf)
  52. );
  53. DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
  54. TP_PROTO(struct va_format *vaf),
  55. TP_ARGS(vaf)
  56. );
  57. DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
  58. TP_PROTO(struct va_format *vaf),
  59. TP_ARGS(vaf)
  60. );
  61. DECLARE_EVENT_CLASS(xhci_log_ctx,
  62. TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
  63. unsigned int ep_num),
  64. TP_ARGS(xhci, ctx, ep_num),
  65. TP_STRUCT__entry(
  66. __field(int, ctx_64)
  67. __field(unsigned, ctx_type)
  68. __field(dma_addr_t, ctx_dma)
  69. __field(u8 *, ctx_va)
  70. __field(unsigned, ctx_ep_num)
  71. __field(int, slot_id)
  72. __dynamic_array(u32, ctx_data,
  73. ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
  74. ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
  75. ),
  76. TP_fast_assign(
  77. struct usb_device *udev;
  78. udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
  79. __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
  80. __entry->ctx_type = ctx->type;
  81. __entry->ctx_dma = ctx->dma;
  82. __entry->ctx_va = ctx->bytes;
  83. __entry->slot_id = udev->slot_id;
  84. __entry->ctx_ep_num = ep_num;
  85. memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
  86. ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
  87. ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
  88. ),
  89. TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
  90. __entry->ctx_64, __entry->ctx_type,
  91. (unsigned long long) __entry->ctx_dma, __entry->ctx_va
  92. )
  93. );
  94. DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
  95. TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
  96. unsigned int ep_num),
  97. TP_ARGS(xhci, ctx, ep_num)
  98. );
  99. DECLARE_EVENT_CLASS(xhci_log_trb,
  100. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  101. TP_ARGS(ring, trb),
  102. TP_STRUCT__entry(
  103. __field(u32, type)
  104. __field(u32, field0)
  105. __field(u32, field1)
  106. __field(u32, field2)
  107. __field(u32, field3)
  108. ),
  109. TP_fast_assign(
  110. __entry->type = ring->type;
  111. __entry->field0 = le32_to_cpu(trb->field[0]);
  112. __entry->field1 = le32_to_cpu(trb->field[1]);
  113. __entry->field2 = le32_to_cpu(trb->field[2]);
  114. __entry->field3 = le32_to_cpu(trb->field[3]);
  115. ),
  116. TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
  117. xhci_decode_trb(__entry->field0, __entry->field1,
  118. __entry->field2, __entry->field3)
  119. )
  120. );
  121. DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
  122. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  123. TP_ARGS(ring, trb)
  124. );
  125. DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
  126. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  127. TP_ARGS(ring, trb)
  128. );
  129. DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
  130. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  131. TP_ARGS(ring, trb)
  132. );
  133. DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
  134. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  135. TP_ARGS(ring, trb)
  136. );
  137. DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
  138. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  139. TP_ARGS(ring, trb)
  140. );
  141. DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
  142. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  143. TP_ARGS(ring, trb)
  144. );
  145. DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
  146. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  147. TP_ARGS(ring, trb)
  148. );
  149. DECLARE_EVENT_CLASS(xhci_log_virt_dev,
  150. TP_PROTO(struct xhci_virt_device *vdev),
  151. TP_ARGS(vdev),
  152. TP_STRUCT__entry(
  153. __field(void *, vdev)
  154. __field(unsigned long long, out_ctx)
  155. __field(unsigned long long, in_ctx)
  156. __field(int, devnum)
  157. __field(int, state)
  158. __field(int, speed)
  159. __field(u8, portnum)
  160. __field(u8, level)
  161. __field(int, slot_id)
  162. ),
  163. TP_fast_assign(
  164. __entry->vdev = vdev;
  165. __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
  166. __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
  167. __entry->devnum = vdev->udev->devnum;
  168. __entry->state = vdev->udev->state;
  169. __entry->speed = vdev->udev->speed;
  170. __entry->portnum = vdev->udev->portnum;
  171. __entry->level = vdev->udev->level;
  172. __entry->slot_id = vdev->udev->slot_id;
  173. ),
  174. TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d",
  175. __entry->vdev, __entry->in_ctx, __entry->out_ctx,
  176. __entry->devnum, __entry->state, __entry->speed,
  177. __entry->portnum, __entry->level, __entry->slot_id
  178. )
  179. );
  180. DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
  181. TP_PROTO(struct xhci_virt_device *vdev),
  182. TP_ARGS(vdev)
  183. );
  184. DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
  185. TP_PROTO(struct xhci_virt_device *vdev),
  186. TP_ARGS(vdev)
  187. );
  188. DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
  189. TP_PROTO(struct xhci_virt_device *vdev),
  190. TP_ARGS(vdev)
  191. );
  192. DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device,
  193. TP_PROTO(struct xhci_virt_device *vdev),
  194. TP_ARGS(vdev)
  195. );
  196. DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device,
  197. TP_PROTO(struct xhci_virt_device *vdev),
  198. TP_ARGS(vdev)
  199. );
  200. DECLARE_EVENT_CLASS(xhci_log_urb,
  201. TP_PROTO(struct urb *urb),
  202. TP_ARGS(urb),
  203. TP_STRUCT__entry(
  204. __field(void *, urb)
  205. __field(unsigned int, pipe)
  206. __field(unsigned int, stream)
  207. __field(int, status)
  208. __field(unsigned int, flags)
  209. __field(int, num_mapped_sgs)
  210. __field(int, num_sgs)
  211. __field(int, length)
  212. __field(int, actual)
  213. __field(int, epnum)
  214. __field(int, dir_in)
  215. __field(int, type)
  216. __field(int, slot_id)
  217. ),
  218. TP_fast_assign(
  219. __entry->urb = urb;
  220. __entry->pipe = urb->pipe;
  221. __entry->stream = urb->stream_id;
  222. __entry->status = urb->status;
  223. __entry->flags = urb->transfer_flags;
  224. __entry->num_mapped_sgs = urb->num_mapped_sgs;
  225. __entry->num_sgs = urb->num_sgs;
  226. __entry->length = urb->transfer_buffer_length;
  227. __entry->actual = urb->actual_length;
  228. __entry->epnum = usb_endpoint_num(&urb->ep->desc);
  229. __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
  230. __entry->type = usb_endpoint_type(&urb->ep->desc);
  231. __entry->slot_id = urb->dev->slot_id;
  232. ),
  233. TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
  234. __entry->epnum, __entry->dir_in ? "in" : "out",
  235. ({ char *s;
  236. switch (__entry->type) {
  237. case USB_ENDPOINT_XFER_INT:
  238. s = "intr";
  239. break;
  240. case USB_ENDPOINT_XFER_CONTROL:
  241. s = "control";
  242. break;
  243. case USB_ENDPOINT_XFER_BULK:
  244. s = "bulk";
  245. break;
  246. case USB_ENDPOINT_XFER_ISOC:
  247. s = "isoc";
  248. break;
  249. default:
  250. s = "UNKNOWN";
  251. } s; }), __entry->urb, __entry->pipe, __entry->slot_id,
  252. __entry->actual, __entry->length, __entry->num_mapped_sgs,
  253. __entry->num_sgs, __entry->stream, __entry->flags
  254. )
  255. );
  256. DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue,
  257. TP_PROTO(struct urb *urb),
  258. TP_ARGS(urb)
  259. );
  260. DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback,
  261. TP_PROTO(struct urb *urb),
  262. TP_ARGS(urb)
  263. );
  264. DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
  265. TP_PROTO(struct urb *urb),
  266. TP_ARGS(urb)
  267. );
  268. DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
  269. TP_PROTO(struct xhci_ep_ctx *ctx),
  270. TP_ARGS(ctx),
  271. TP_STRUCT__entry(
  272. __field(u32, info)
  273. __field(u32, info2)
  274. __field(u64, deq)
  275. __field(u32, tx_info)
  276. ),
  277. TP_fast_assign(
  278. __entry->info = le32_to_cpu(ctx->ep_info);
  279. __entry->info2 = le32_to_cpu(ctx->ep_info2);
  280. __entry->deq = le64_to_cpu(ctx->deq);
  281. __entry->tx_info = le32_to_cpu(ctx->tx_info);
  282. ),
  283. TP_printk("%s", xhci_decode_ep_context(__entry->info,
  284. __entry->info2, __entry->deq, __entry->tx_info)
  285. )
  286. );
  287. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
  288. TP_PROTO(struct xhci_ep_ctx *ctx),
  289. TP_ARGS(ctx)
  290. );
  291. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
  292. TP_PROTO(struct xhci_ep_ctx *ctx),
  293. TP_ARGS(ctx)
  294. );
  295. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
  296. TP_PROTO(struct xhci_ep_ctx *ctx),
  297. TP_ARGS(ctx)
  298. );
  299. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
  300. TP_PROTO(struct xhci_ep_ctx *ctx),
  301. TP_ARGS(ctx)
  302. );
  303. DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
  304. TP_PROTO(struct xhci_slot_ctx *ctx),
  305. TP_ARGS(ctx),
  306. TP_STRUCT__entry(
  307. __field(u32, info)
  308. __field(u32, info2)
  309. __field(u32, tt_info)
  310. __field(u32, state)
  311. ),
  312. TP_fast_assign(
  313. __entry->info = le32_to_cpu(ctx->dev_info);
  314. __entry->info2 = le32_to_cpu(ctx->dev_info2);
  315. __entry->tt_info = le64_to_cpu(ctx->tt_info);
  316. __entry->state = le32_to_cpu(ctx->dev_state);
  317. ),
  318. TP_printk("%s", xhci_decode_slot_context(__entry->info,
  319. __entry->info2, __entry->tt_info,
  320. __entry->state)
  321. )
  322. );
  323. DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
  324. TP_PROTO(struct xhci_slot_ctx *ctx),
  325. TP_ARGS(ctx)
  326. );
  327. DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
  328. TP_PROTO(struct xhci_slot_ctx *ctx),
  329. TP_ARGS(ctx)
  330. );
  331. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
  332. TP_PROTO(struct xhci_slot_ctx *ctx),
  333. TP_ARGS(ctx)
  334. );
  335. DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
  336. TP_PROTO(struct xhci_slot_ctx *ctx),
  337. TP_ARGS(ctx)
  338. );
  339. DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
  340. TP_PROTO(struct xhci_slot_ctx *ctx),
  341. TP_ARGS(ctx)
  342. );
  343. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
  344. TP_PROTO(struct xhci_slot_ctx *ctx),
  345. TP_ARGS(ctx)
  346. );
  347. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
  348. TP_PROTO(struct xhci_slot_ctx *ctx),
  349. TP_ARGS(ctx)
  350. );
  351. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
  352. TP_PROTO(struct xhci_slot_ctx *ctx),
  353. TP_ARGS(ctx)
  354. );
  355. DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
  356. TP_PROTO(struct xhci_slot_ctx *ctx),
  357. TP_ARGS(ctx)
  358. );
  359. DECLARE_EVENT_CLASS(xhci_log_ring,
  360. TP_PROTO(struct xhci_ring *ring),
  361. TP_ARGS(ring),
  362. TP_STRUCT__entry(
  363. __field(u32, type)
  364. __field(void *, ring)
  365. __field(dma_addr_t, enq)
  366. __field(dma_addr_t, deq)
  367. __field(dma_addr_t, enq_seg)
  368. __field(dma_addr_t, deq_seg)
  369. __field(unsigned int, num_segs)
  370. __field(unsigned int, stream_id)
  371. __field(unsigned int, cycle_state)
  372. __field(unsigned int, num_trbs_free)
  373. __field(unsigned int, bounce_buf_len)
  374. ),
  375. TP_fast_assign(
  376. __entry->ring = ring;
  377. __entry->type = ring->type;
  378. __entry->num_segs = ring->num_segs;
  379. __entry->stream_id = ring->stream_id;
  380. __entry->enq_seg = ring->enq_seg->dma;
  381. __entry->deq_seg = ring->deq_seg->dma;
  382. __entry->cycle_state = ring->cycle_state;
  383. __entry->num_trbs_free = ring->num_trbs_free;
  384. __entry->bounce_buf_len = ring->bounce_buf_len;
  385. __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  386. __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
  387. ),
  388. TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d free_trbs %d bounce %d cycle %d",
  389. xhci_ring_type_string(__entry->type), __entry->ring,
  390. &__entry->enq, &__entry->enq_seg,
  391. &__entry->deq, &__entry->deq_seg,
  392. __entry->num_segs,
  393. __entry->stream_id,
  394. __entry->num_trbs_free,
  395. __entry->bounce_buf_len,
  396. __entry->cycle_state
  397. )
  398. );
  399. DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
  400. TP_PROTO(struct xhci_ring *ring),
  401. TP_ARGS(ring)
  402. );
  403. DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
  404. TP_PROTO(struct xhci_ring *ring),
  405. TP_ARGS(ring)
  406. );
  407. DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
  408. TP_PROTO(struct xhci_ring *ring),
  409. TP_ARGS(ring)
  410. );
  411. DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
  412. TP_PROTO(struct xhci_ring *ring),
  413. TP_ARGS(ring)
  414. );
  415. DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
  416. TP_PROTO(struct xhci_ring *ring),
  417. TP_ARGS(ring)
  418. );
  419. DECLARE_EVENT_CLASS(xhci_log_portsc,
  420. TP_PROTO(u32 portnum, u32 portsc),
  421. TP_ARGS(portnum, portsc),
  422. TP_STRUCT__entry(
  423. __field(u32, portnum)
  424. __field(u32, portsc)
  425. ),
  426. TP_fast_assign(
  427. __entry->portnum = portnum;
  428. __entry->portsc = portsc;
  429. ),
  430. TP_printk("port-%d: %s",
  431. __entry->portnum,
  432. xhci_decode_portsc(__entry->portsc)
  433. )
  434. );
  435. DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
  436. TP_PROTO(u32 portnum, u32 portsc),
  437. TP_ARGS(portnum, portsc)
  438. );
  439. DEFINE_EVENT(xhci_log_portsc, xhci_get_port_status,
  440. TP_PROTO(u32 portnum, u32 portsc),
  441. TP_ARGS(portnum, portsc)
  442. );
  443. DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
  444. TP_PROTO(u32 portnum, u32 portsc),
  445. TP_ARGS(portnum, portsc)
  446. );
  447. DECLARE_EVENT_CLASS(xhci_dbc_log_request,
  448. TP_PROTO(struct dbc_request *req),
  449. TP_ARGS(req),
  450. TP_STRUCT__entry(
  451. __field(struct dbc_request *, req)
  452. __field(bool, dir)
  453. __field(unsigned int, actual)
  454. __field(unsigned int, length)
  455. __field(int, status)
  456. ),
  457. TP_fast_assign(
  458. __entry->req = req;
  459. __entry->dir = req->direction;
  460. __entry->actual = req->actual;
  461. __entry->length = req->length;
  462. __entry->status = req->status;
  463. ),
  464. TP_printk("%s: req %p length %u/%u ==> %d",
  465. __entry->dir ? "bulk-in" : "bulk-out",
  466. __entry->req, __entry->actual,
  467. __entry->length, __entry->status
  468. )
  469. );
  470. DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_alloc_request,
  471. TP_PROTO(struct dbc_request *req),
  472. TP_ARGS(req)
  473. );
  474. DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_free_request,
  475. TP_PROTO(struct dbc_request *req),
  476. TP_ARGS(req)
  477. );
  478. DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_queue_request,
  479. TP_PROTO(struct dbc_request *req),
  480. TP_ARGS(req)
  481. );
  482. DEFINE_EVENT(xhci_dbc_log_request, xhci_dbc_giveback_request,
  483. TP_PROTO(struct dbc_request *req),
  484. TP_ARGS(req)
  485. );
  486. #endif /* __XHCI_TRACE_H */
  487. /* this part must be outside header guard */
  488. #undef TRACE_INCLUDE_PATH
  489. #define TRACE_INCLUDE_PATH .
  490. #undef TRACE_INCLUDE_FILE
  491. #define TRACE_INCLUDE_FILE xhci-trace
  492. #include <trace/define_trace.h>