xhci-trace.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2013 Xenia Ragiadakou
  5. *
  6. * Author: Xenia Ragiadakou
  7. * Email : burzalodowa@gmail.com
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #undef TRACE_SYSTEM
  14. #define TRACE_SYSTEM xhci-hcd
  15. /*
  16. * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
  17. * legitimate C variable. It is not exported to user space.
  18. */
  19. #undef TRACE_SYSTEM_VAR
  20. #define TRACE_SYSTEM_VAR xhci_hcd
  21. #if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
  22. #define __XHCI_TRACE_H
  23. #include <linux/tracepoint.h>
  24. #include "xhci.h"
  25. #define XHCI_MSG_MAX 500
  26. DECLARE_EVENT_CLASS(xhci_log_msg,
  27. TP_PROTO(struct va_format *vaf),
  28. TP_ARGS(vaf),
  29. TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
  30. TP_fast_assign(
  31. vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
  32. ),
  33. TP_printk("%s", __get_str(msg))
  34. );
  35. DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
  36. TP_PROTO(struct va_format *vaf),
  37. TP_ARGS(vaf)
  38. );
  39. DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
  40. TP_PROTO(struct va_format *vaf),
  41. TP_ARGS(vaf)
  42. );
  43. DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
  44. TP_PROTO(struct va_format *vaf),
  45. TP_ARGS(vaf)
  46. );
  47. DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
  48. TP_PROTO(struct va_format *vaf),
  49. TP_ARGS(vaf)
  50. );
  51. DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
  52. TP_PROTO(struct va_format *vaf),
  53. TP_ARGS(vaf)
  54. );
  55. DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
  56. TP_PROTO(struct va_format *vaf),
  57. TP_ARGS(vaf)
  58. );
  59. DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
  60. TP_PROTO(struct va_format *vaf),
  61. TP_ARGS(vaf)
  62. );
  63. DECLARE_EVENT_CLASS(xhci_log_ctx,
  64. TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
  65. unsigned int ep_num),
  66. TP_ARGS(xhci, ctx, ep_num),
  67. TP_STRUCT__entry(
  68. __field(int, ctx_64)
  69. __field(unsigned, ctx_type)
  70. __field(dma_addr_t, ctx_dma)
  71. __field(u8 *, ctx_va)
  72. __field(unsigned, ctx_ep_num)
  73. __field(int, slot_id)
  74. __dynamic_array(u32, ctx_data,
  75. ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
  76. ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
  77. ),
  78. TP_fast_assign(
  79. struct usb_device *udev;
  80. udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
  81. __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
  82. __entry->ctx_type = ctx->type;
  83. __entry->ctx_dma = ctx->dma;
  84. __entry->ctx_va = ctx->bytes;
  85. __entry->slot_id = udev->slot_id;
  86. __entry->ctx_ep_num = ep_num;
  87. memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
  88. ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
  89. ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
  90. ),
  91. TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
  92. __entry->ctx_64, __entry->ctx_type,
  93. (unsigned long long) __entry->ctx_dma, __entry->ctx_va
  94. )
  95. );
  96. DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
  97. TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
  98. unsigned int ep_num),
  99. TP_ARGS(xhci, ctx, ep_num)
  100. );
  101. DECLARE_EVENT_CLASS(xhci_log_trb,
  102. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  103. TP_ARGS(ring, trb),
  104. TP_STRUCT__entry(
  105. __field(u32, type)
  106. __field(u32, field0)
  107. __field(u32, field1)
  108. __field(u32, field2)
  109. __field(u32, field3)
  110. ),
  111. TP_fast_assign(
  112. __entry->type = ring->type;
  113. __entry->field0 = le32_to_cpu(trb->field[0]);
  114. __entry->field1 = le32_to_cpu(trb->field[1]);
  115. __entry->field2 = le32_to_cpu(trb->field[2]);
  116. __entry->field3 = le32_to_cpu(trb->field[3]);
  117. ),
  118. TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
  119. xhci_decode_trb(__entry->field0, __entry->field1,
  120. __entry->field2, __entry->field3)
  121. )
  122. );
  123. DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
  124. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  125. TP_ARGS(ring, trb)
  126. );
  127. DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
  128. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  129. TP_ARGS(ring, trb)
  130. );
  131. DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
  132. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  133. TP_ARGS(ring, trb)
  134. );
  135. DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
  136. TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
  137. TP_ARGS(ring, trb)
  138. );
  139. DECLARE_EVENT_CLASS(xhci_log_virt_dev,
  140. TP_PROTO(struct xhci_virt_device *vdev),
  141. TP_ARGS(vdev),
  142. TP_STRUCT__entry(
  143. __field(void *, vdev)
  144. __field(unsigned long long, out_ctx)
  145. __field(unsigned long long, in_ctx)
  146. __field(int, devnum)
  147. __field(int, state)
  148. __field(int, speed)
  149. __field(u8, portnum)
  150. __field(u8, level)
  151. __field(int, slot_id)
  152. ),
  153. TP_fast_assign(
  154. __entry->vdev = vdev;
  155. __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
  156. __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
  157. __entry->devnum = vdev->udev->devnum;
  158. __entry->state = vdev->udev->state;
  159. __entry->speed = vdev->udev->speed;
  160. __entry->portnum = vdev->udev->portnum;
  161. __entry->level = vdev->udev->level;
  162. __entry->slot_id = vdev->udev->slot_id;
  163. ),
  164. TP_printk("vdev %p ctx %llx | %llx num %d state %d speed %d port %d level %d slot %d",
  165. __entry->vdev, __entry->in_ctx, __entry->out_ctx,
  166. __entry->devnum, __entry->state, __entry->speed,
  167. __entry->portnum, __entry->level, __entry->slot_id
  168. )
  169. );
  170. DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
  171. TP_PROTO(struct xhci_virt_device *vdev),
  172. TP_ARGS(vdev)
  173. );
  174. DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
  175. TP_PROTO(struct xhci_virt_device *vdev),
  176. TP_ARGS(vdev)
  177. );
  178. DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
  179. TP_PROTO(struct xhci_virt_device *vdev),
  180. TP_ARGS(vdev)
  181. );
  182. DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_addressable_virt_device,
  183. TP_PROTO(struct xhci_virt_device *vdev),
  184. TP_ARGS(vdev)
  185. );
  186. DEFINE_EVENT(xhci_log_virt_dev, xhci_stop_device,
  187. TP_PROTO(struct xhci_virt_device *vdev),
  188. TP_ARGS(vdev)
  189. );
  190. DECLARE_EVENT_CLASS(xhci_log_urb,
  191. TP_PROTO(struct urb *urb),
  192. TP_ARGS(urb),
  193. TP_STRUCT__entry(
  194. __field(void *, urb)
  195. __field(unsigned int, pipe)
  196. __field(unsigned int, stream)
  197. __field(int, status)
  198. __field(unsigned int, flags)
  199. __field(int, num_mapped_sgs)
  200. __field(int, num_sgs)
  201. __field(int, length)
  202. __field(int, actual)
  203. __field(int, epnum)
  204. __field(int, dir_in)
  205. __field(int, type)
  206. __field(int, slot_id)
  207. ),
  208. TP_fast_assign(
  209. __entry->urb = urb;
  210. __entry->pipe = urb->pipe;
  211. __entry->stream = urb->stream_id;
  212. __entry->status = urb->status;
  213. __entry->flags = urb->transfer_flags;
  214. __entry->num_mapped_sgs = urb->num_mapped_sgs;
  215. __entry->num_sgs = urb->num_sgs;
  216. __entry->length = urb->transfer_buffer_length;
  217. __entry->actual = urb->actual_length;
  218. __entry->epnum = usb_endpoint_num(&urb->ep->desc);
  219. __entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
  220. __entry->type = usb_endpoint_type(&urb->ep->desc);
  221. __entry->slot_id = urb->dev->slot_id;
  222. ),
  223. TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
  224. __entry->epnum, __entry->dir_in ? "in" : "out",
  225. ({ char *s;
  226. switch (__entry->type) {
  227. case USB_ENDPOINT_XFER_INT:
  228. s = "intr";
  229. break;
  230. case USB_ENDPOINT_XFER_CONTROL:
  231. s = "control";
  232. break;
  233. case USB_ENDPOINT_XFER_BULK:
  234. s = "bulk";
  235. break;
  236. case USB_ENDPOINT_XFER_ISOC:
  237. s = "isoc";
  238. break;
  239. default:
  240. s = "UNKNOWN";
  241. } s; }), __entry->urb, __entry->pipe, __entry->slot_id,
  242. __entry->actual, __entry->length, __entry->num_mapped_sgs,
  243. __entry->num_sgs, __entry->stream, __entry->flags
  244. )
  245. );
  246. DEFINE_EVENT(xhci_log_urb, xhci_urb_enqueue,
  247. TP_PROTO(struct urb *urb),
  248. TP_ARGS(urb)
  249. );
  250. DEFINE_EVENT(xhci_log_urb, xhci_urb_giveback,
  251. TP_PROTO(struct urb *urb),
  252. TP_ARGS(urb)
  253. );
  254. DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
  255. TP_PROTO(struct urb *urb),
  256. TP_ARGS(urb)
  257. );
  258. DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
  259. TP_PROTO(struct xhci_ep_ctx *ctx),
  260. TP_ARGS(ctx),
  261. TP_STRUCT__entry(
  262. __field(u32, info)
  263. __field(u32, info2)
  264. __field(u64, deq)
  265. __field(u32, tx_info)
  266. ),
  267. TP_fast_assign(
  268. __entry->info = le32_to_cpu(ctx->ep_info);
  269. __entry->info2 = le32_to_cpu(ctx->ep_info2);
  270. __entry->deq = le64_to_cpu(ctx->deq);
  271. __entry->tx_info = le32_to_cpu(ctx->tx_info);
  272. ),
  273. TP_printk("%s", xhci_decode_ep_context(__entry->info,
  274. __entry->info2, __entry->deq, __entry->tx_info)
  275. )
  276. );
  277. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
  278. TP_PROTO(struct xhci_ep_ctx *ctx),
  279. TP_ARGS(ctx)
  280. );
  281. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
  282. TP_PROTO(struct xhci_ep_ctx *ctx),
  283. TP_ARGS(ctx)
  284. );
  285. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
  286. TP_PROTO(struct xhci_ep_ctx *ctx),
  287. TP_ARGS(ctx)
  288. );
  289. DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
  290. TP_PROTO(struct xhci_ep_ctx *ctx),
  291. TP_ARGS(ctx)
  292. );
  293. DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
  294. TP_PROTO(struct xhci_slot_ctx *ctx),
  295. TP_ARGS(ctx),
  296. TP_STRUCT__entry(
  297. __field(u32, info)
  298. __field(u32, info2)
  299. __field(u32, tt_info)
  300. __field(u32, state)
  301. ),
  302. TP_fast_assign(
  303. __entry->info = le32_to_cpu(ctx->dev_info);
  304. __entry->info2 = le32_to_cpu(ctx->dev_info2);
  305. __entry->tt_info = le64_to_cpu(ctx->tt_info);
  306. __entry->state = le32_to_cpu(ctx->dev_state);
  307. ),
  308. TP_printk("%s", xhci_decode_slot_context(__entry->info,
  309. __entry->info2, __entry->tt_info,
  310. __entry->state)
  311. )
  312. );
  313. DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
  314. TP_PROTO(struct xhci_slot_ctx *ctx),
  315. TP_ARGS(ctx)
  316. );
  317. DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
  318. TP_PROTO(struct xhci_slot_ctx *ctx),
  319. TP_ARGS(ctx)
  320. );
  321. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
  322. TP_PROTO(struct xhci_slot_ctx *ctx),
  323. TP_ARGS(ctx)
  324. );
  325. DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
  326. TP_PROTO(struct xhci_slot_ctx *ctx),
  327. TP_ARGS(ctx)
  328. );
  329. DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
  330. TP_PROTO(struct xhci_slot_ctx *ctx),
  331. TP_ARGS(ctx)
  332. );
  333. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
  334. TP_PROTO(struct xhci_slot_ctx *ctx),
  335. TP_ARGS(ctx)
  336. );
  337. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
  338. TP_PROTO(struct xhci_slot_ctx *ctx),
  339. TP_ARGS(ctx)
  340. );
  341. DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
  342. TP_PROTO(struct xhci_slot_ctx *ctx),
  343. TP_ARGS(ctx)
  344. );
  345. DECLARE_EVENT_CLASS(xhci_log_ring,
  346. TP_PROTO(struct xhci_ring *ring),
  347. TP_ARGS(ring),
  348. TP_STRUCT__entry(
  349. __field(u32, type)
  350. __field(void *, ring)
  351. __field(dma_addr_t, enq)
  352. __field(dma_addr_t, deq)
  353. __field(dma_addr_t, enq_seg)
  354. __field(dma_addr_t, deq_seg)
  355. __field(unsigned int, num_segs)
  356. __field(unsigned int, stream_id)
  357. __field(unsigned int, cycle_state)
  358. __field(unsigned int, num_trbs_free)
  359. __field(unsigned int, bounce_buf_len)
  360. ),
  361. TP_fast_assign(
  362. __entry->ring = ring;
  363. __entry->type = ring->type;
  364. __entry->num_segs = ring->num_segs;
  365. __entry->stream_id = ring->stream_id;
  366. __entry->enq_seg = ring->enq_seg->dma;
  367. __entry->deq_seg = ring->deq_seg->dma;
  368. __entry->cycle_state = ring->cycle_state;
  369. __entry->num_trbs_free = ring->num_trbs_free;
  370. __entry->bounce_buf_len = ring->bounce_buf_len;
  371. __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  372. __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
  373. ),
  374. TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d free_trbs %d bounce %d cycle %d",
  375. xhci_ring_type_string(__entry->type), __entry->ring,
  376. &__entry->enq, &__entry->enq_seg,
  377. &__entry->deq, &__entry->deq_seg,
  378. __entry->num_segs,
  379. __entry->stream_id,
  380. __entry->num_trbs_free,
  381. __entry->bounce_buf_len,
  382. __entry->cycle_state
  383. )
  384. );
  385. DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
  386. TP_PROTO(struct xhci_ring *ring),
  387. TP_ARGS(ring)
  388. );
  389. DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
  390. TP_PROTO(struct xhci_ring *ring),
  391. TP_ARGS(ring)
  392. );
  393. DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
  394. TP_PROTO(struct xhci_ring *ring),
  395. TP_ARGS(ring)
  396. );
  397. DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
  398. TP_PROTO(struct xhci_ring *ring),
  399. TP_ARGS(ring)
  400. );
  401. DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
  402. TP_PROTO(struct xhci_ring *ring),
  403. TP_ARGS(ring)
  404. );
  405. DECLARE_EVENT_CLASS(xhci_log_portsc,
  406. TP_PROTO(u32 portnum, u32 portsc),
  407. TP_ARGS(portnum, portsc),
  408. TP_STRUCT__entry(
  409. __field(u32, portnum)
  410. __field(u32, portsc)
  411. ),
  412. TP_fast_assign(
  413. __entry->portnum = portnum;
  414. __entry->portsc = portsc;
  415. ),
  416. TP_printk("port-%d: %s",
  417. __entry->portnum,
  418. xhci_decode_portsc(__entry->portsc)
  419. )
  420. );
  421. DEFINE_EVENT(xhci_log_portsc, xhci_handle_port_status,
  422. TP_PROTO(u32 portnum, u32 portsc),
  423. TP_ARGS(portnum, portsc)
  424. );
  425. #endif /* __XHCI_TRACE_H */
  426. /* this part must be outside header guard */
  427. #undef TRACE_INCLUDE_PATH
  428. #define TRACE_INCLUDE_PATH .
  429. #undef TRACE_INCLUDE_FILE
  430. #define TRACE_INCLUDE_FILE xhci-trace
  431. #include <trace/define_trace.h>