xhci-dbgcap.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996
  1. /**
  2. * xhci-dbgcap.c - xHCI debug capability support
  3. *
  4. * Copyright (C) 2017 Intel Corporation
  5. *
  6. * Author: Lu Baolu <baolu.lu@linux.intel.com>
  7. */
  8. #include <linux/dma-mapping.h>
  9. #include <linux/slab.h>
  10. #include <linux/nls.h>
  11. #include "xhci.h"
  12. #include "xhci-trace.h"
  13. #include "xhci-dbgcap.h"
  14. static inline void *
  15. dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
  16. dma_addr_t *dma_handle, gfp_t flags)
  17. {
  18. void *vaddr;
  19. vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
  20. size, dma_handle, flags);
  21. memset(vaddr, 0, size);
  22. return vaddr;
  23. }
  24. static inline void
  25. dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size,
  26. void *cpu_addr, dma_addr_t dma_handle)
  27. {
  28. if (cpu_addr)
  29. dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev,
  30. size, cpu_addr, dma_handle);
  31. }
  32. static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
  33. {
  34. struct usb_string_descriptor *s_desc;
  35. u32 string_length;
  36. /* Serial string: */
  37. s_desc = (struct usb_string_descriptor *)strings->serial;
  38. utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
  39. UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  40. DBC_MAX_STRING_LENGTH);
  41. s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
  42. s_desc->bDescriptorType = USB_DT_STRING;
  43. string_length = s_desc->bLength;
  44. string_length <<= 8;
  45. /* Product string: */
  46. s_desc = (struct usb_string_descriptor *)strings->product;
  47. utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
  48. UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  49. DBC_MAX_STRING_LENGTH);
  50. s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
  51. s_desc->bDescriptorType = USB_DT_STRING;
  52. string_length += s_desc->bLength;
  53. string_length <<= 8;
  54. /* Manufacture string: */
  55. s_desc = (struct usb_string_descriptor *)strings->manufacturer;
  56. utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
  57. strlen(DBC_STRING_MANUFACTURER),
  58. UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  59. DBC_MAX_STRING_LENGTH);
  60. s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
  61. s_desc->bDescriptorType = USB_DT_STRING;
  62. string_length += s_desc->bLength;
  63. string_length <<= 8;
  64. /* String0: */
  65. strings->string0[0] = 4;
  66. strings->string0[1] = USB_DT_STRING;
  67. strings->string0[2] = 0x09;
  68. strings->string0[3] = 0x04;
  69. string_length += 4;
  70. return string_length;
  71. }
  72. static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length)
  73. {
  74. struct xhci_dbc *dbc;
  75. struct dbc_info_context *info;
  76. struct xhci_ep_ctx *ep_ctx;
  77. u32 dev_info;
  78. dma_addr_t deq, dma;
  79. unsigned int max_burst;
  80. dbc = xhci->dbc;
  81. if (!dbc)
  82. return;
  83. /* Populate info Context: */
  84. info = (struct dbc_info_context *)dbc->ctx->bytes;
  85. dma = dbc->string_dma;
  86. info->string0 = cpu_to_le64(dma);
  87. info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
  88. info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
  89. info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
  90. info->length = cpu_to_le32(string_length);
  91. /* Populate bulk out endpoint context: */
  92. ep_ctx = dbc_bulkout_ctx(dbc);
  93. max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
  94. deq = dbc_bulkout_enq(dbc);
  95. ep_ctx->ep_info = 0;
  96. ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
  97. ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
  98. /* Populate bulk in endpoint context: */
  99. ep_ctx = dbc_bulkin_ctx(dbc);
  100. deq = dbc_bulkin_enq(dbc);
  101. ep_ctx->ep_info = 0;
  102. ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
  103. ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
  104. /* Set DbC context and info registers: */
  105. xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp);
  106. dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
  107. writel(dev_info, &dbc->regs->devinfo1);
  108. dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
  109. writel(dev_info, &dbc->regs->devinfo2);
  110. }
  111. static void xhci_dbc_giveback(struct dbc_request *req, int status)
  112. __releases(&dbc->lock)
  113. __acquires(&dbc->lock)
  114. {
  115. struct dbc_ep *dep = req->dep;
  116. struct xhci_dbc *dbc = dep->dbc;
  117. struct xhci_hcd *xhci = dbc->xhci;
  118. struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
  119. list_del_init(&req->list_pending);
  120. req->trb_dma = 0;
  121. req->trb = NULL;
  122. if (req->status == -EINPROGRESS)
  123. req->status = status;
  124. trace_xhci_dbc_giveback_request(req);
  125. dma_unmap_single(dev,
  126. req->dma,
  127. req->length,
  128. dbc_ep_dma_direction(dep));
  129. /* Give back the transfer request: */
  130. spin_unlock(&dbc->lock);
  131. req->complete(xhci, req);
  132. spin_lock(&dbc->lock);
  133. }
  134. static void xhci_dbc_flush_single_request(struct dbc_request *req)
  135. {
  136. union xhci_trb *trb = req->trb;
  137. trb->generic.field[0] = 0;
  138. trb->generic.field[1] = 0;
  139. trb->generic.field[2] = 0;
  140. trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
  141. trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
  142. xhci_dbc_giveback(req, -ESHUTDOWN);
  143. }
  144. static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
  145. {
  146. struct dbc_request *req, *tmp;
  147. list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
  148. xhci_dbc_flush_single_request(req);
  149. }
  150. static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc)
  151. {
  152. xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
  153. xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
  154. }
  155. struct dbc_request *
  156. dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
  157. {
  158. struct dbc_request *req;
  159. req = kzalloc(sizeof(*req), gfp_flags);
  160. if (!req)
  161. return NULL;
  162. req->dep = dep;
  163. INIT_LIST_HEAD(&req->list_pending);
  164. INIT_LIST_HEAD(&req->list_pool);
  165. req->direction = dep->direction;
  166. trace_xhci_dbc_alloc_request(req);
  167. return req;
  168. }
  169. void
  170. dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
  171. {
  172. trace_xhci_dbc_free_request(req);
  173. kfree(req);
  174. }
  175. static void
  176. xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
  177. u32 field2, u32 field3, u32 field4)
  178. {
  179. union xhci_trb *trb, *next;
  180. trb = ring->enqueue;
  181. trb->generic.field[0] = cpu_to_le32(field1);
  182. trb->generic.field[1] = cpu_to_le32(field2);
  183. trb->generic.field[2] = cpu_to_le32(field3);
  184. trb->generic.field[3] = cpu_to_le32(field4);
  185. trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
  186. ring->num_trbs_free--;
  187. next = ++(ring->enqueue);
  188. if (TRB_TYPE_LINK_LE32(next->link.control)) {
  189. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  190. ring->enqueue = ring->enq_seg->trbs;
  191. ring->cycle_state ^= 1;
  192. }
  193. }
  194. static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
  195. struct dbc_request *req)
  196. {
  197. u64 addr;
  198. union xhci_trb *trb;
  199. unsigned int num_trbs;
  200. struct xhci_dbc *dbc = dep->dbc;
  201. struct xhci_ring *ring = dep->ring;
  202. u32 length, control, cycle;
  203. num_trbs = count_trbs(req->dma, req->length);
  204. WARN_ON(num_trbs != 1);
  205. if (ring->num_trbs_free < num_trbs)
  206. return -EBUSY;
  207. addr = req->dma;
  208. trb = ring->enqueue;
  209. cycle = ring->cycle_state;
  210. length = TRB_LEN(req->length);
  211. control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
  212. if (cycle)
  213. control &= cpu_to_le32(~TRB_CYCLE);
  214. else
  215. control |= cpu_to_le32(TRB_CYCLE);
  216. req->trb = ring->enqueue;
  217. req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  218. xhci_dbc_queue_trb(ring,
  219. lower_32_bits(addr),
  220. upper_32_bits(addr),
  221. length, control);
  222. /*
  223. * Add a barrier between writes of trb fields and flipping
  224. * the cycle bit:
  225. */
  226. wmb();
  227. if (cycle)
  228. trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
  229. else
  230. trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
  231. writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
  232. return 0;
  233. }
  234. static int
  235. dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
  236. {
  237. int ret;
  238. struct device *dev;
  239. struct xhci_dbc *dbc = dep->dbc;
  240. struct xhci_hcd *xhci = dbc->xhci;
  241. dev = xhci_to_hcd(xhci)->self.sysdev;
  242. if (!req->length || !req->buf)
  243. return -EINVAL;
  244. req->actual = 0;
  245. req->status = -EINPROGRESS;
  246. req->dma = dma_map_single(dev,
  247. req->buf,
  248. req->length,
  249. dbc_ep_dma_direction(dep));
  250. if (dma_mapping_error(dev, req->dma)) {
  251. xhci_err(xhci, "failed to map buffer\n");
  252. return -EFAULT;
  253. }
  254. ret = xhci_dbc_queue_bulk_tx(dep, req);
  255. if (ret) {
  256. xhci_err(xhci, "failed to queue trbs\n");
  257. dma_unmap_single(dev,
  258. req->dma,
  259. req->length,
  260. dbc_ep_dma_direction(dep));
  261. return -EFAULT;
  262. }
  263. list_add_tail(&req->list_pending, &dep->list_pending);
  264. return 0;
  265. }
  266. int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
  267. gfp_t gfp_flags)
  268. {
  269. struct xhci_dbc *dbc = dep->dbc;
  270. int ret = -ESHUTDOWN;
  271. spin_lock(&dbc->lock);
  272. if (dbc->state == DS_CONFIGURED)
  273. ret = dbc_ep_do_queue(dep, req);
  274. spin_unlock(&dbc->lock);
  275. mod_delayed_work(system_wq, &dbc->event_work, 0);
  276. trace_xhci_dbc_queue_request(req);
  277. return ret;
  278. }
  279. static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
  280. {
  281. struct dbc_ep *dep;
  282. struct xhci_dbc *dbc = xhci->dbc;
  283. dep = &dbc->eps[direction];
  284. dep->dbc = dbc;
  285. dep->direction = direction;
  286. dep->ring = direction ? dbc->ring_in : dbc->ring_out;
  287. INIT_LIST_HEAD(&dep->list_pending);
  288. }
  289. static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
  290. {
  291. xhci_dbc_do_eps_init(xhci, BULK_OUT);
  292. xhci_dbc_do_eps_init(xhci, BULK_IN);
  293. }
  294. static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
  295. {
  296. struct xhci_dbc *dbc = xhci->dbc;
  297. memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
  298. }
  299. static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
  300. {
  301. int ret;
  302. dma_addr_t deq;
  303. u32 string_length;
  304. struct xhci_dbc *dbc = xhci->dbc;
  305. /* Allocate various rings for events and transfers: */
  306. dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
  307. if (!dbc->ring_evt)
  308. goto evt_fail;
  309. dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
  310. if (!dbc->ring_in)
  311. goto in_fail;
  312. dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
  313. if (!dbc->ring_out)
  314. goto out_fail;
  315. /* Allocate and populate ERST: */
  316. ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags);
  317. if (ret)
  318. goto erst_fail;
  319. /* Allocate context data structure: */
  320. dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
  321. if (!dbc->ctx)
  322. goto ctx_fail;
  323. /* Allocate the string table: */
  324. dbc->string_size = sizeof(struct dbc_str_descs);
  325. dbc->string = dbc_dma_alloc_coherent(xhci,
  326. dbc->string_size,
  327. &dbc->string_dma,
  328. flags);
  329. if (!dbc->string)
  330. goto string_fail;
  331. /* Setup ERST register: */
  332. writel(dbc->erst.erst_size, &dbc->regs->ersts);
  333. xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba);
  334. deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
  335. dbc->ring_evt->dequeue);
  336. xhci_write_64(xhci, deq, &dbc->regs->erdp);
  337. /* Setup strings and contexts: */
  338. string_length = xhci_dbc_populate_strings(dbc->string);
  339. xhci_dbc_init_contexts(xhci, string_length);
  340. mmiowb();
  341. xhci_dbc_eps_init(xhci);
  342. dbc->state = DS_INITIALIZED;
  343. return 0;
  344. string_fail:
  345. xhci_free_container_ctx(xhci, dbc->ctx);
  346. dbc->ctx = NULL;
  347. ctx_fail:
  348. xhci_free_erst(xhci, &dbc->erst);
  349. erst_fail:
  350. xhci_ring_free(xhci, dbc->ring_out);
  351. dbc->ring_out = NULL;
  352. out_fail:
  353. xhci_ring_free(xhci, dbc->ring_in);
  354. dbc->ring_in = NULL;
  355. in_fail:
  356. xhci_ring_free(xhci, dbc->ring_evt);
  357. dbc->ring_evt = NULL;
  358. evt_fail:
  359. return -ENOMEM;
  360. }
  361. static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
  362. {
  363. struct xhci_dbc *dbc = xhci->dbc;
  364. if (!dbc)
  365. return;
  366. xhci_dbc_eps_exit(xhci);
  367. if (dbc->string) {
  368. dbc_dma_free_coherent(xhci,
  369. dbc->string_size,
  370. dbc->string, dbc->string_dma);
  371. dbc->string = NULL;
  372. }
  373. xhci_free_container_ctx(xhci, dbc->ctx);
  374. dbc->ctx = NULL;
  375. xhci_free_erst(xhci, &dbc->erst);
  376. xhci_ring_free(xhci, dbc->ring_out);
  377. xhci_ring_free(xhci, dbc->ring_in);
  378. xhci_ring_free(xhci, dbc->ring_evt);
  379. dbc->ring_in = NULL;
  380. dbc->ring_out = NULL;
  381. dbc->ring_evt = NULL;
  382. }
  383. static int xhci_do_dbc_start(struct xhci_hcd *xhci)
  384. {
  385. int ret;
  386. u32 ctrl;
  387. struct xhci_dbc *dbc = xhci->dbc;
  388. if (dbc->state != DS_DISABLED)
  389. return -EINVAL;
  390. writel(0, &dbc->regs->control);
  391. ret = xhci_handshake(&dbc->regs->control,
  392. DBC_CTRL_DBC_ENABLE,
  393. 0, 1000);
  394. if (ret)
  395. return ret;
  396. ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
  397. if (ret)
  398. return ret;
  399. ctrl = readl(&dbc->regs->control);
  400. writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
  401. &dbc->regs->control);
  402. ret = xhci_handshake(&dbc->regs->control,
  403. DBC_CTRL_DBC_ENABLE,
  404. DBC_CTRL_DBC_ENABLE, 1000);
  405. if (ret)
  406. return ret;
  407. dbc->state = DS_ENABLED;
  408. return 0;
  409. }
  410. static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
  411. {
  412. struct xhci_dbc *dbc = xhci->dbc;
  413. if (dbc->state == DS_DISABLED)
  414. return;
  415. writel(0, &dbc->regs->control);
  416. xhci_dbc_mem_cleanup(xhci);
  417. dbc->state = DS_DISABLED;
  418. }
  419. static int xhci_dbc_start(struct xhci_hcd *xhci)
  420. {
  421. int ret;
  422. struct xhci_dbc *dbc = xhci->dbc;
  423. WARN_ON(!dbc);
  424. pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
  425. spin_lock(&dbc->lock);
  426. ret = xhci_do_dbc_start(xhci);
  427. spin_unlock(&dbc->lock);
  428. if (ret) {
  429. pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
  430. return ret;
  431. }
  432. return mod_delayed_work(system_wq, &dbc->event_work, 1);
  433. }
  434. static void xhci_dbc_stop(struct xhci_hcd *xhci)
  435. {
  436. struct xhci_dbc *dbc = xhci->dbc;
  437. struct dbc_port *port = &dbc->port;
  438. WARN_ON(!dbc);
  439. cancel_delayed_work_sync(&dbc->event_work);
  440. if (port->registered)
  441. xhci_dbc_tty_unregister_device(xhci);
  442. spin_lock(&dbc->lock);
  443. xhci_do_dbc_stop(xhci);
  444. spin_unlock(&dbc->lock);
  445. pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
  446. }
  447. static void
  448. dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
  449. {
  450. u32 portsc;
  451. struct xhci_dbc *dbc = xhci->dbc;
  452. portsc = readl(&dbc->regs->portsc);
  453. if (portsc & DBC_PORTSC_CONN_CHANGE)
  454. xhci_info(xhci, "DbC port connect change\n");
  455. if (portsc & DBC_PORTSC_RESET_CHANGE)
  456. xhci_info(xhci, "DbC port reset change\n");
  457. if (portsc & DBC_PORTSC_LINK_CHANGE)
  458. xhci_info(xhci, "DbC port link status change\n");
  459. if (portsc & DBC_PORTSC_CONFIG_CHANGE)
  460. xhci_info(xhci, "DbC config error change\n");
  461. /* Port reset change bit will be cleared in other place: */
  462. writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
  463. }
  464. static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
  465. {
  466. struct dbc_ep *dep;
  467. struct xhci_ring *ring;
  468. int ep_id;
  469. int status;
  470. u32 comp_code;
  471. size_t remain_length;
  472. struct dbc_request *req = NULL, *r;
  473. comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
  474. remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
  475. ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
  476. dep = (ep_id == EPID_OUT) ?
  477. get_out_ep(xhci) : get_in_ep(xhci);
  478. ring = dep->ring;
  479. switch (comp_code) {
  480. case COMP_SUCCESS:
  481. remain_length = 0;
  482. /* FALLTHROUGH */
  483. case COMP_SHORT_PACKET:
  484. status = 0;
  485. break;
  486. case COMP_TRB_ERROR:
  487. case COMP_BABBLE_DETECTED_ERROR:
  488. case COMP_USB_TRANSACTION_ERROR:
  489. case COMP_STALL_ERROR:
  490. xhci_warn(xhci, "tx error %d detected\n", comp_code);
  491. status = -comp_code;
  492. break;
  493. default:
  494. xhci_err(xhci, "unknown tx error %d\n", comp_code);
  495. status = -comp_code;
  496. break;
  497. }
  498. /* Match the pending request: */
  499. list_for_each_entry(r, &dep->list_pending, list_pending) {
  500. if (r->trb_dma == event->trans_event.buffer) {
  501. req = r;
  502. break;
  503. }
  504. }
  505. if (!req) {
  506. xhci_warn(xhci, "no matched request\n");
  507. return;
  508. }
  509. trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
  510. ring->num_trbs_free++;
  511. req->actual = req->length - remain_length;
  512. xhci_dbc_giveback(req, status);
  513. }
  514. static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
  515. {
  516. dma_addr_t deq;
  517. struct dbc_ep *dep;
  518. union xhci_trb *evt;
  519. u32 ctrl, portsc;
  520. struct xhci_hcd *xhci = dbc->xhci;
  521. bool update_erdp = false;
  522. /* DbC state machine: */
  523. switch (dbc->state) {
  524. case DS_DISABLED:
  525. case DS_INITIALIZED:
  526. return EVT_ERR;
  527. case DS_ENABLED:
  528. portsc = readl(&dbc->regs->portsc);
  529. if (portsc & DBC_PORTSC_CONN_STATUS) {
  530. dbc->state = DS_CONNECTED;
  531. xhci_info(xhci, "DbC connected\n");
  532. }
  533. return EVT_DONE;
  534. case DS_CONNECTED:
  535. ctrl = readl(&dbc->regs->control);
  536. if (ctrl & DBC_CTRL_DBC_RUN) {
  537. dbc->state = DS_CONFIGURED;
  538. xhci_info(xhci, "DbC configured\n");
  539. portsc = readl(&dbc->regs->portsc);
  540. writel(portsc, &dbc->regs->portsc);
  541. return EVT_GSER;
  542. }
  543. return EVT_DONE;
  544. case DS_CONFIGURED:
  545. /* Handle cable unplug event: */
  546. portsc = readl(&dbc->regs->portsc);
  547. if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
  548. !(portsc & DBC_PORTSC_CONN_STATUS)) {
  549. xhci_info(xhci, "DbC cable unplugged\n");
  550. dbc->state = DS_ENABLED;
  551. xhci_dbc_flush_reqests(dbc);
  552. return EVT_DISC;
  553. }
  554. /* Handle debug port reset event: */
  555. if (portsc & DBC_PORTSC_RESET_CHANGE) {
  556. xhci_info(xhci, "DbC port reset\n");
  557. writel(portsc, &dbc->regs->portsc);
  558. dbc->state = DS_ENABLED;
  559. xhci_dbc_flush_reqests(dbc);
  560. return EVT_DISC;
  561. }
  562. /* Handle endpoint stall event: */
  563. ctrl = readl(&dbc->regs->control);
  564. if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
  565. (ctrl & DBC_CTRL_HALT_OUT_TR)) {
  566. xhci_info(xhci, "DbC Endpoint stall\n");
  567. dbc->state = DS_STALLED;
  568. if (ctrl & DBC_CTRL_HALT_IN_TR) {
  569. dep = get_in_ep(xhci);
  570. xhci_dbc_flush_endpoint_requests(dep);
  571. }
  572. if (ctrl & DBC_CTRL_HALT_OUT_TR) {
  573. dep = get_out_ep(xhci);
  574. xhci_dbc_flush_endpoint_requests(dep);
  575. }
  576. return EVT_DONE;
  577. }
  578. /* Clear DbC run change bit: */
  579. if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
  580. writel(ctrl, &dbc->regs->control);
  581. ctrl = readl(&dbc->regs->control);
  582. }
  583. break;
  584. case DS_STALLED:
  585. ctrl = readl(&dbc->regs->control);
  586. if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
  587. !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
  588. (ctrl & DBC_CTRL_DBC_RUN)) {
  589. dbc->state = DS_CONFIGURED;
  590. break;
  591. }
  592. return EVT_DONE;
  593. default:
  594. xhci_err(xhci, "Unknown DbC state %d\n", dbc->state);
  595. break;
  596. }
  597. /* Handle the events in the event ring: */
  598. evt = dbc->ring_evt->dequeue;
  599. while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
  600. dbc->ring_evt->cycle_state) {
  601. /*
  602. * Add a barrier between reading the cycle flag and any
  603. * reads of the event's flags/data below:
  604. */
  605. rmb();
  606. trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
  607. switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
  608. case TRB_TYPE(TRB_PORT_STATUS):
  609. dbc_handle_port_status(xhci, evt);
  610. break;
  611. case TRB_TYPE(TRB_TRANSFER):
  612. dbc_handle_xfer_event(xhci, evt);
  613. break;
  614. default:
  615. break;
  616. }
  617. inc_deq(xhci, dbc->ring_evt);
  618. evt = dbc->ring_evt->dequeue;
  619. update_erdp = true;
  620. }
  621. /* Update event ring dequeue pointer: */
  622. if (update_erdp) {
  623. deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
  624. dbc->ring_evt->dequeue);
  625. xhci_write_64(xhci, deq, &dbc->regs->erdp);
  626. }
  627. return EVT_DONE;
  628. }
  629. static void xhci_dbc_handle_events(struct work_struct *work)
  630. {
  631. int ret;
  632. enum evtreturn evtr;
  633. struct xhci_dbc *dbc;
  634. struct xhci_hcd *xhci;
  635. dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
  636. xhci = dbc->xhci;
  637. spin_lock(&dbc->lock);
  638. evtr = xhci_dbc_do_handle_events(dbc);
  639. spin_unlock(&dbc->lock);
  640. switch (evtr) {
  641. case EVT_GSER:
  642. ret = xhci_dbc_tty_register_device(xhci);
  643. if (ret) {
  644. xhci_err(xhci, "failed to alloc tty device\n");
  645. break;
  646. }
  647. xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n");
  648. break;
  649. case EVT_DISC:
  650. xhci_dbc_tty_unregister_device(xhci);
  651. break;
  652. case EVT_DONE:
  653. break;
  654. default:
  655. xhci_info(xhci, "stop handling dbc events\n");
  656. return;
  657. }
  658. mod_delayed_work(system_wq, &dbc->event_work, 1);
  659. }
  660. static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
  661. {
  662. unsigned long flags;
  663. spin_lock_irqsave(&xhci->lock, flags);
  664. kfree(xhci->dbc);
  665. xhci->dbc = NULL;
  666. spin_unlock_irqrestore(&xhci->lock, flags);
  667. }
  668. static int xhci_do_dbc_init(struct xhci_hcd *xhci)
  669. {
  670. u32 reg;
  671. struct xhci_dbc *dbc;
  672. unsigned long flags;
  673. void __iomem *base;
  674. int dbc_cap_offs;
  675. base = &xhci->cap_regs->hc_capbase;
  676. dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
  677. if (!dbc_cap_offs)
  678. return -ENODEV;
  679. dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
  680. if (!dbc)
  681. return -ENOMEM;
  682. dbc->regs = base + dbc_cap_offs;
  683. /* We will avoid using DbC in xhci driver if it's in use. */
  684. reg = readl(&dbc->regs->control);
  685. if (reg & DBC_CTRL_DBC_ENABLE) {
  686. kfree(dbc);
  687. return -EBUSY;
  688. }
  689. spin_lock_irqsave(&xhci->lock, flags);
  690. if (xhci->dbc) {
  691. spin_unlock_irqrestore(&xhci->lock, flags);
  692. kfree(dbc);
  693. return -EBUSY;
  694. }
  695. xhci->dbc = dbc;
  696. spin_unlock_irqrestore(&xhci->lock, flags);
  697. dbc->xhci = xhci;
  698. INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
  699. spin_lock_init(&dbc->lock);
  700. return 0;
  701. }
  702. static ssize_t dbc_show(struct device *dev,
  703. struct device_attribute *attr,
  704. char *buf)
  705. {
  706. const char *p;
  707. struct xhci_dbc *dbc;
  708. struct xhci_hcd *xhci;
  709. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  710. dbc = xhci->dbc;
  711. switch (dbc->state) {
  712. case DS_DISABLED:
  713. p = "disabled";
  714. break;
  715. case DS_INITIALIZED:
  716. p = "initialized";
  717. break;
  718. case DS_ENABLED:
  719. p = "enabled";
  720. break;
  721. case DS_CONNECTED:
  722. p = "connected";
  723. break;
  724. case DS_CONFIGURED:
  725. p = "configured";
  726. break;
  727. case DS_STALLED:
  728. p = "stalled";
  729. break;
  730. default:
  731. p = "unknown";
  732. }
  733. return sprintf(buf, "%s\n", p);
  734. }
  735. static ssize_t dbc_store(struct device *dev,
  736. struct device_attribute *attr,
  737. const char *buf, size_t count)
  738. {
  739. struct xhci_dbc *dbc;
  740. struct xhci_hcd *xhci;
  741. xhci = hcd_to_xhci(dev_get_drvdata(dev));
  742. dbc = xhci->dbc;
  743. if (!strncmp(buf, "enable", 6))
  744. xhci_dbc_start(xhci);
  745. else if (!strncmp(buf, "disable", 7))
  746. xhci_dbc_stop(xhci);
  747. else
  748. return -EINVAL;
  749. return count;
  750. }
  751. static DEVICE_ATTR_RW(dbc);
  752. int xhci_dbc_init(struct xhci_hcd *xhci)
  753. {
  754. int ret;
  755. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  756. ret = xhci_do_dbc_init(xhci);
  757. if (ret)
  758. goto init_err3;
  759. ret = xhci_dbc_tty_register_driver(xhci);
  760. if (ret)
  761. goto init_err2;
  762. ret = device_create_file(dev, &dev_attr_dbc);
  763. if (ret)
  764. goto init_err1;
  765. return 0;
  766. init_err1:
  767. xhci_dbc_tty_unregister_driver();
  768. init_err2:
  769. xhci_do_dbc_exit(xhci);
  770. init_err3:
  771. return ret;
  772. }
  773. void xhci_dbc_exit(struct xhci_hcd *xhci)
  774. {
  775. struct device *dev = xhci_to_hcd(xhci)->self.controller;
  776. if (!xhci->dbc)
  777. return;
  778. device_remove_file(dev, &dev_attr_dbc);
  779. xhci_dbc_tty_unregister_driver();
  780. xhci_dbc_stop(xhci);
  781. xhci_do_dbc_exit(xhci);
  782. }
  783. #ifdef CONFIG_PM
  784. int xhci_dbc_suspend(struct xhci_hcd *xhci)
  785. {
  786. struct xhci_dbc *dbc = xhci->dbc;
  787. if (!dbc)
  788. return 0;
  789. if (dbc->state == DS_CONFIGURED)
  790. dbc->resume_required = 1;
  791. xhci_dbc_stop(xhci);
  792. return 0;
  793. }
  794. int xhci_dbc_resume(struct xhci_hcd *xhci)
  795. {
  796. int ret = 0;
  797. struct xhci_dbc *dbc = xhci->dbc;
  798. if (!dbc)
  799. return 0;
  800. if (dbc->resume_required) {
  801. dbc->resume_required = 0;
  802. xhci_dbc_start(xhci);
  803. }
  804. return ret;
  805. }
  806. #endif /* CONFIG_PM */