hcd_queue.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. /*
  2. * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
  3. *
  4. * Copyright (C) 2004-2013 Synopsys, Inc.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions, and the following disclaimer,
  11. * without modification.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. The names of the above-listed copyright holders may not be used
  16. * to endorse or promote products derived from this software without
  17. * specific prior written permission.
  18. *
  19. * ALTERNATIVELY, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") as published by the Free Software
  21. * Foundation; either version 2 of the License, or (at your option) any
  22. * later version.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  25. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  28. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  29. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  30. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  31. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. /*
  37. * This file contains the functions to manage Queue Heads and Queue
  38. * Transfer Descriptors for Host mode
  39. */
  40. #include <linux/kernel.h>
  41. #include <linux/module.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/dma-mapping.h>
  45. #include <linux/io.h>
  46. #include <linux/slab.h>
  47. #include <linux/usb.h>
  48. #include <linux/usb/hcd.h>
  49. #include <linux/usb/ch11.h>
  50. #include "core.h"
  51. #include "hcd.h"
  52. /* Wait this long before releasing periodic reservation */
  53. #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
  54. /**
  55. * dwc2_periodic_channel_available() - Checks that a channel is available for a
  56. * periodic transfer
  57. *
  58. * @hsotg: The HCD state structure for the DWC OTG controller
  59. *
  60. * Return: 0 if successful, negative error code otherwise
  61. */
  62. static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
  63. {
  64. /*
  65. * Currently assuming that there is a dedicated host channel for
  66. * each periodic transaction plus at least one host channel for
  67. * non-periodic transactions
  68. */
  69. int status;
  70. int num_channels;
  71. num_channels = hsotg->core_params->host_channels;
  72. if (hsotg->periodic_channels + hsotg->non_periodic_channels <
  73. num_channels
  74. && hsotg->periodic_channels < num_channels - 1) {
  75. status = 0;
  76. } else {
  77. dev_dbg(hsotg->dev,
  78. "%s: Total channels: %d, Periodic: %d, "
  79. "Non-periodic: %d\n", __func__, num_channels,
  80. hsotg->periodic_channels, hsotg->non_periodic_channels);
  81. status = -ENOSPC;
  82. }
  83. return status;
  84. }
  85. /**
  86. * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
  87. * for the specified QH in the periodic schedule
  88. *
  89. * @hsotg: The HCD state structure for the DWC OTG controller
  90. * @qh: QH containing periodic bandwidth required
  91. *
  92. * Return: 0 if successful, negative error code otherwise
  93. *
  94. * For simplicity, this calculation assumes that all the transfers in the
  95. * periodic schedule may occur in the same (micro)frame
  96. */
  97. static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
  98. struct dwc2_qh *qh)
  99. {
  100. int status;
  101. s16 max_claimed_usecs;
  102. status = 0;
  103. if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
  104. /*
  105. * High speed mode
  106. * Max periodic usecs is 80% x 125 usec = 100 usec
  107. */
  108. max_claimed_usecs = 100 - qh->host_us;
  109. } else {
  110. /*
  111. * Full speed mode
  112. * Max periodic usecs is 90% x 1000 usec = 900 usec
  113. */
  114. max_claimed_usecs = 900 - qh->host_us;
  115. }
  116. if (hsotg->periodic_usecs > max_claimed_usecs) {
  117. dev_err(hsotg->dev,
  118. "%s: already claimed usecs %d, required usecs %d\n",
  119. __func__, hsotg->periodic_usecs, qh->host_us);
  120. status = -ENOSPC;
  121. }
  122. return status;
  123. }
  124. /**
  125. * Microframe scheduler
  126. * track the total use in hsotg->frame_usecs
  127. * keep each qh use in qh->frame_usecs
  128. * when surrendering the qh then donate the time back
  129. */
  130. static const unsigned short max_uframe_usecs[] = {
  131. 100, 100, 100, 100, 100, 100, 30, 0
  132. };
  133. void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
  134. {
  135. int i;
  136. for (i = 0; i < 8; i++)
  137. hsotg->frame_usecs[i] = max_uframe_usecs[i];
  138. }
  139. static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  140. {
  141. unsigned short utime = qh->host_us;
  142. int i;
  143. for (i = 0; i < 8; i++) {
  144. /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
  145. if (utime <= hsotg->frame_usecs[i]) {
  146. hsotg->frame_usecs[i] -= utime;
  147. qh->frame_usecs[i] += utime;
  148. return i;
  149. }
  150. }
  151. return -ENOSPC;
  152. }
  153. /*
  154. * use this for FS apps that can span multiple uframes
  155. */
  156. static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  157. {
  158. unsigned short utime = qh->host_us;
  159. unsigned short xtime;
  160. int t_left;
  161. int i;
  162. int j;
  163. int k;
  164. for (i = 0; i < 8; i++) {
  165. if (hsotg->frame_usecs[i] <= 0)
  166. continue;
  167. /*
  168. * we need n consecutive slots so use j as a start slot
  169. * j plus j+1 must be enough time (for now)
  170. */
  171. xtime = hsotg->frame_usecs[i];
  172. for (j = i + 1; j < 8; j++) {
  173. /*
  174. * if we add this frame remaining time to xtime we may
  175. * be OK, if not we need to test j for a complete frame
  176. */
  177. if (xtime + hsotg->frame_usecs[j] < utime) {
  178. if (hsotg->frame_usecs[j] <
  179. max_uframe_usecs[j])
  180. continue;
  181. }
  182. if (xtime >= utime) {
  183. t_left = utime;
  184. for (k = i; k < 8; k++) {
  185. t_left -= hsotg->frame_usecs[k];
  186. if (t_left <= 0) {
  187. qh->frame_usecs[k] +=
  188. hsotg->frame_usecs[k]
  189. + t_left;
  190. hsotg->frame_usecs[k] = -t_left;
  191. return i;
  192. } else {
  193. qh->frame_usecs[k] +=
  194. hsotg->frame_usecs[k];
  195. hsotg->frame_usecs[k] = 0;
  196. }
  197. }
  198. }
  199. /* add the frame time to x time */
  200. xtime += hsotg->frame_usecs[j];
  201. /* we must have a fully available next frame or break */
  202. if (xtime < utime &&
  203. hsotg->frame_usecs[j] == max_uframe_usecs[j])
  204. continue;
  205. }
  206. }
  207. return -ENOSPC;
  208. }
  209. static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  210. {
  211. int ret;
  212. if (qh->dev_speed == USB_SPEED_HIGH) {
  213. /* if this is a hs transaction we need a full frame */
  214. ret = dwc2_find_single_uframe(hsotg, qh);
  215. } else {
  216. /*
  217. * if this is a fs transaction we may need a sequence
  218. * of frames
  219. */
  220. ret = dwc2_find_multi_uframe(hsotg, qh);
  221. }
  222. return ret;
  223. }
  224. /**
  225. * dwc2_do_unreserve() - Actually release the periodic reservation
  226. *
  227. * This function actually releases the periodic bandwidth that was reserved
  228. * by the given qh.
  229. *
  230. * @hsotg: The HCD state structure for the DWC OTG controller
  231. * @qh: QH for the periodic transfer.
  232. */
  233. static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  234. {
  235. assert_spin_locked(&hsotg->lock);
  236. WARN_ON(!qh->unreserve_pending);
  237. /* No more unreserve pending--we're doing it */
  238. qh->unreserve_pending = false;
  239. if (WARN_ON(!list_empty(&qh->qh_list_entry)))
  240. list_del_init(&qh->qh_list_entry);
  241. /* Update claimed usecs per (micro)frame */
  242. hsotg->periodic_usecs -= qh->host_us;
  243. if (hsotg->core_params->uframe_sched > 0) {
  244. int i;
  245. for (i = 0; i < 8; i++) {
  246. hsotg->frame_usecs[i] += qh->frame_usecs[i];
  247. qh->frame_usecs[i] = 0;
  248. }
  249. } else {
  250. /* Release periodic channel reservation */
  251. hsotg->periodic_channels--;
  252. }
  253. }
  254. /**
  255. * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
  256. *
  257. * According to the kernel doc for usb_submit_urb() (specifically the part about
  258. * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
  259. * long as a device driver keeps submitting. Since we're using HCD_BH to give
  260. * back the URB we need to give the driver a little bit of time before we
  261. * release the reservation. This worker is called after the appropriate
  262. * delay.
  263. *
  264. * @work: Pointer to a qh unreserve_work.
  265. */
  266. static void dwc2_unreserve_timer_fn(unsigned long data)
  267. {
  268. struct dwc2_qh *qh = (struct dwc2_qh *)data;
  269. struct dwc2_hsotg *hsotg = qh->hsotg;
  270. unsigned long flags;
  271. /*
  272. * Wait for the lock, or for us to be scheduled again. We
  273. * could be scheduled again if:
  274. * - We started executing but didn't get the lock yet.
  275. * - A new reservation came in, but cancel didn't take effect
  276. * because we already started executing.
  277. * - The timer has been kicked again.
  278. * In that case cancel and wait for the next call.
  279. */
  280. while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
  281. if (timer_pending(&qh->unreserve_timer))
  282. return;
  283. }
  284. /*
  285. * Might be no more unreserve pending if:
  286. * - We started executing but didn't get the lock yet.
  287. * - A new reservation came in, but cancel didn't take effect
  288. * because we already started executing.
  289. *
  290. * We can't put this in the loop above because unreserve_pending needs
  291. * to be accessed under lock, so we can only check it once we got the
  292. * lock.
  293. */
  294. if (qh->unreserve_pending)
  295. dwc2_do_unreserve(hsotg, qh);
  296. spin_unlock_irqrestore(&hsotg->lock, flags);
  297. }
  298. /**
  299. * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
  300. * host channel is large enough to handle the maximum data transfer in a single
  301. * (micro)frame for a periodic transfer
  302. *
  303. * @hsotg: The HCD state structure for the DWC OTG controller
  304. * @qh: QH for a periodic endpoint
  305. *
  306. * Return: 0 if successful, negative error code otherwise
  307. */
  308. static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
  309. struct dwc2_qh *qh)
  310. {
  311. u32 max_xfer_size;
  312. u32 max_channel_xfer_size;
  313. int status = 0;
  314. max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
  315. max_channel_xfer_size = hsotg->core_params->max_transfer_size;
  316. if (max_xfer_size > max_channel_xfer_size) {
  317. dev_err(hsotg->dev,
  318. "%s: Periodic xfer length %d > max xfer length for channel %d\n",
  319. __func__, max_xfer_size, max_channel_xfer_size);
  320. status = -ENOSPC;
  321. }
  322. return status;
  323. }
  324. /**
  325. * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
  326. * the periodic schedule
  327. *
  328. * @hsotg: The HCD state structure for the DWC OTG controller
  329. * @qh: QH for the periodic transfer. The QH should already contain the
  330. * scheduling information.
  331. *
  332. * Return: 0 if successful, negative error code otherwise
  333. */
  334. static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  335. {
  336. int status;
  337. status = dwc2_check_max_xfer_size(hsotg, qh);
  338. if (status) {
  339. dev_dbg(hsotg->dev,
  340. "%s: Channel max transfer size too small for periodic transfer\n",
  341. __func__);
  342. return status;
  343. }
  344. /* Cancel pending unreserve; if canceled OK, unreserve was pending */
  345. if (del_timer(&qh->unreserve_timer))
  346. WARN_ON(!qh->unreserve_pending);
  347. /*
  348. * Only need to reserve if there's not an unreserve pending, since if an
  349. * unreserve is pending then by definition our old reservation is still
  350. * valid. Unreserve might still be pending even if we didn't cancel if
  351. * dwc2_unreserve_timer_fn() already started. Code in the timer handles
  352. * that case.
  353. */
  354. if (!qh->unreserve_pending) {
  355. if (hsotg->core_params->uframe_sched > 0) {
  356. int frame = -1;
  357. status = dwc2_find_uframe(hsotg, qh);
  358. if (status == 0)
  359. frame = 7;
  360. else if (status > 0)
  361. frame = status - 1;
  362. /* Set the new frame up */
  363. if (frame >= 0) {
  364. qh->next_active_frame &= ~0x7;
  365. qh->next_active_frame |= (frame & 7);
  366. dwc2_sch_dbg(hsotg,
  367. "QH=%p sched_p nxt=%04x, uf=%d\n",
  368. qh, qh->next_active_frame, frame);
  369. }
  370. if (status > 0)
  371. status = 0;
  372. } else {
  373. status = dwc2_periodic_channel_available(hsotg);
  374. if (status) {
  375. dev_info(hsotg->dev,
  376. "%s: No host channel available for periodic transfer\n",
  377. __func__);
  378. return status;
  379. }
  380. status = dwc2_check_periodic_bandwidth(hsotg, qh);
  381. }
  382. if (status) {
  383. dev_dbg(hsotg->dev,
  384. "%s: Insufficient periodic bandwidth for periodic transfer\n",
  385. __func__);
  386. return status;
  387. }
  388. if (hsotg->core_params->uframe_sched <= 0)
  389. /* Reserve periodic channel */
  390. hsotg->periodic_channels++;
  391. /* Update claimed usecs per (micro)frame */
  392. hsotg->periodic_usecs += qh->host_us;
  393. }
  394. qh->unreserve_pending = 0;
  395. if (hsotg->core_params->dma_desc_enable > 0)
  396. /* Don't rely on SOF and start in ready schedule */
  397. list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
  398. else
  399. /* Always start in inactive schedule */
  400. list_add_tail(&qh->qh_list_entry,
  401. &hsotg->periodic_sched_inactive);
  402. return status;
  403. }
  404. /**
  405. * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
  406. * from the periodic schedule
  407. *
  408. * @hsotg: The HCD state structure for the DWC OTG controller
  409. * @qh: QH for the periodic transfer
  410. */
  411. static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
  412. struct dwc2_qh *qh)
  413. {
  414. bool did_modify;
  415. assert_spin_locked(&hsotg->lock);
  416. /*
  417. * Schedule the unreserve to happen in a little bit. Cases here:
  418. * - Unreserve worker might be sitting there waiting to grab the lock.
  419. * In this case it will notice it's been schedule again and will
  420. * quit.
  421. * - Unreserve worker might not be scheduled.
  422. *
  423. * We should never already be scheduled since dwc2_schedule_periodic()
  424. * should have canceled the scheduled unreserve timer (hence the
  425. * warning on did_modify).
  426. *
  427. * We add + 1 to the timer to guarantee that at least 1 jiffy has
  428. * passed (otherwise if the jiffy counter might tick right after we
  429. * read it and we'll get no delay).
  430. */
  431. did_modify = mod_timer(&qh->unreserve_timer,
  432. jiffies + DWC2_UNRESERVE_DELAY + 1);
  433. WARN_ON(did_modify);
  434. qh->unreserve_pending = 1;
  435. list_del_init(&qh->qh_list_entry);
  436. }
  437. /**
  438. * dwc2_qh_init() - Initializes a QH structure
  439. *
  440. * @hsotg: The HCD state structure for the DWC OTG controller
  441. * @qh: The QH to init
  442. * @urb: Holds the information about the device/endpoint needed to initialize
  443. * the QH
  444. */
  445. #define SCHEDULE_SLOP 10
  446. static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
  447. struct dwc2_hcd_urb *urb)
  448. {
  449. int dev_speed, hub_addr, hub_port;
  450. char *speed, *type;
  451. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  452. /* Initialize QH */
  453. qh->hsotg = hsotg;
  454. setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
  455. (unsigned long)qh);
  456. qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
  457. qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
  458. qh->data_toggle = DWC2_HC_PID_DATA0;
  459. qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
  460. INIT_LIST_HEAD(&qh->qtd_list);
  461. INIT_LIST_HEAD(&qh->qh_list_entry);
  462. /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
  463. dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
  464. dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
  465. if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
  466. hub_addr != 0 && hub_addr != 1) {
  467. dev_vdbg(hsotg->dev,
  468. "QH init: EP %d: TT found at hub addr %d, for port %d\n",
  469. dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
  470. hub_port);
  471. qh->do_split = 1;
  472. }
  473. if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
  474. qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
  475. /* Compute scheduling parameters once and save them */
  476. u32 hprt, prtspd;
  477. /* Todo: Account for split transfers in the bus time */
  478. int bytecount =
  479. dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
  480. qh->host_us = NS_TO_US(usb_calc_bus_time(qh->do_split ?
  481. USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
  482. qh->ep_type == USB_ENDPOINT_XFER_ISOC,
  483. bytecount));
  484. /* Ensure frame_number corresponds to the reality */
  485. hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
  486. /* Start in a slightly future (micro)frame */
  487. qh->next_active_frame = dwc2_frame_num_inc(hsotg->frame_number,
  488. SCHEDULE_SLOP);
  489. qh->host_interval = urb->interval;
  490. dwc2_sch_dbg(hsotg, "QH=%p init nxt=%04x, fn=%04x, int=%#x\n",
  491. qh, qh->next_active_frame, hsotg->frame_number,
  492. qh->host_interval);
  493. #if 0
  494. /* Increase interrupt polling rate for debugging */
  495. if (qh->ep_type == USB_ENDPOINT_XFER_INT)
  496. qh->host_interval = 8;
  497. #endif
  498. hprt = dwc2_readl(hsotg->regs + HPRT0);
  499. prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
  500. if (prtspd == HPRT0_SPD_HIGH_SPEED &&
  501. (dev_speed == USB_SPEED_LOW ||
  502. dev_speed == USB_SPEED_FULL)) {
  503. qh->host_interval *= 8;
  504. qh->next_active_frame |= 0x7;
  505. qh->start_split_frame = qh->next_active_frame;
  506. dwc2_sch_dbg(hsotg,
  507. "QH=%p init*8 nxt=%04x, fn=%04x, int=%#x\n",
  508. qh, qh->next_active_frame,
  509. hsotg->frame_number, qh->host_interval);
  510. }
  511. dev_dbg(hsotg->dev, "interval=%d\n", qh->host_interval);
  512. }
  513. dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
  514. dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
  515. dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
  516. dwc2_hcd_get_dev_addr(&urb->pipe_info));
  517. dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
  518. dwc2_hcd_get_ep_num(&urb->pipe_info),
  519. dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
  520. qh->dev_speed = dev_speed;
  521. switch (dev_speed) {
  522. case USB_SPEED_LOW:
  523. speed = "low";
  524. break;
  525. case USB_SPEED_FULL:
  526. speed = "full";
  527. break;
  528. case USB_SPEED_HIGH:
  529. speed = "high";
  530. break;
  531. default:
  532. speed = "?";
  533. break;
  534. }
  535. dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
  536. switch (qh->ep_type) {
  537. case USB_ENDPOINT_XFER_ISOC:
  538. type = "isochronous";
  539. break;
  540. case USB_ENDPOINT_XFER_INT:
  541. type = "interrupt";
  542. break;
  543. case USB_ENDPOINT_XFER_CONTROL:
  544. type = "control";
  545. break;
  546. case USB_ENDPOINT_XFER_BULK:
  547. type = "bulk";
  548. break;
  549. default:
  550. type = "?";
  551. break;
  552. }
  553. dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
  554. if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
  555. dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
  556. qh->host_us);
  557. dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
  558. qh->host_interval);
  559. }
  560. }
  561. /**
  562. * dwc2_hcd_qh_create() - Allocates and initializes a QH
  563. *
  564. * @hsotg: The HCD state structure for the DWC OTG controller
  565. * @urb: Holds the information about the device/endpoint needed
  566. * to initialize the QH
  567. * @atomic_alloc: Flag to do atomic allocation if needed
  568. *
  569. * Return: Pointer to the newly allocated QH, or NULL on error
  570. */
  571. struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
  572. struct dwc2_hcd_urb *urb,
  573. gfp_t mem_flags)
  574. {
  575. struct dwc2_qh *qh;
  576. if (!urb->priv)
  577. return NULL;
  578. /* Allocate memory */
  579. qh = kzalloc(sizeof(*qh), mem_flags);
  580. if (!qh)
  581. return NULL;
  582. dwc2_qh_init(hsotg, qh, urb);
  583. if (hsotg->core_params->dma_desc_enable > 0 &&
  584. dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
  585. dwc2_hcd_qh_free(hsotg, qh);
  586. return NULL;
  587. }
  588. return qh;
  589. }
  590. /**
  591. * dwc2_hcd_qh_free() - Frees the QH
  592. *
  593. * @hsotg: HCD instance
  594. * @qh: The QH to free
  595. *
  596. * QH should already be removed from the list. QTD list should already be empty
  597. * if called from URB Dequeue.
  598. *
  599. * Must NOT be called with interrupt disabled or spinlock held
  600. */
  601. void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  602. {
  603. /* Make sure any unreserve work is finished. */
  604. if (del_timer_sync(&qh->unreserve_timer)) {
  605. unsigned long flags;
  606. spin_lock_irqsave(&hsotg->lock, flags);
  607. dwc2_do_unreserve(hsotg, qh);
  608. spin_unlock_irqrestore(&hsotg->lock, flags);
  609. }
  610. if (qh->desc_list)
  611. dwc2_hcd_qh_free_ddma(hsotg, qh);
  612. kfree(qh);
  613. }
  614. /**
  615. * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
  616. * schedule if it is not already in the schedule. If the QH is already in
  617. * the schedule, no action is taken.
  618. *
  619. * @hsotg: The HCD state structure for the DWC OTG controller
  620. * @qh: The QH to add
  621. *
  622. * Return: 0 if successful, negative error code otherwise
  623. */
  624. int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  625. {
  626. int status;
  627. u32 intr_mask;
  628. if (dbg_qh(qh))
  629. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  630. if (!list_empty(&qh->qh_list_entry))
  631. /* QH already in a schedule */
  632. return 0;
  633. if (!dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number) &&
  634. !hsotg->frame_number) {
  635. u16 new_frame;
  636. dev_dbg(hsotg->dev,
  637. "reset frame number counter\n");
  638. new_frame = dwc2_frame_num_inc(hsotg->frame_number,
  639. SCHEDULE_SLOP);
  640. dwc2_sch_vdbg(hsotg, "QH=%p reset nxt=%04x=>%04x\n",
  641. qh, qh->next_active_frame, new_frame);
  642. qh->next_active_frame = new_frame;
  643. }
  644. /* Add the new QH to the appropriate schedule */
  645. if (dwc2_qh_is_non_per(qh)) {
  646. /* Always start in inactive schedule */
  647. list_add_tail(&qh->qh_list_entry,
  648. &hsotg->non_periodic_sched_inactive);
  649. return 0;
  650. }
  651. status = dwc2_schedule_periodic(hsotg, qh);
  652. if (status)
  653. return status;
  654. if (!hsotg->periodic_qh_count) {
  655. intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
  656. intr_mask |= GINTSTS_SOF;
  657. dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
  658. }
  659. hsotg->periodic_qh_count++;
  660. return 0;
  661. }
  662. /**
  663. * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
  664. * schedule. Memory is not freed.
  665. *
  666. * @hsotg: The HCD state structure
  667. * @qh: QH to remove from schedule
  668. */
  669. void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  670. {
  671. u32 intr_mask;
  672. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  673. if (list_empty(&qh->qh_list_entry))
  674. /* QH is not in a schedule */
  675. return;
  676. if (dwc2_qh_is_non_per(qh)) {
  677. if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
  678. hsotg->non_periodic_qh_ptr =
  679. hsotg->non_periodic_qh_ptr->next;
  680. list_del_init(&qh->qh_list_entry);
  681. return;
  682. }
  683. dwc2_deschedule_periodic(hsotg, qh);
  684. hsotg->periodic_qh_count--;
  685. if (!hsotg->periodic_qh_count) {
  686. intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
  687. intr_mask &= ~GINTSTS_SOF;
  688. dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
  689. }
  690. }
  691. /*
  692. * Schedule the next continuing periodic split transfer
  693. */
  694. static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
  695. struct dwc2_qh *qh, u16 frame_number,
  696. int sched_next_periodic_split)
  697. {
  698. u16 incr;
  699. u16 old_frame = qh->next_active_frame;
  700. if (sched_next_periodic_split) {
  701. qh->next_active_frame = frame_number;
  702. incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
  703. if (dwc2_frame_num_le(frame_number, incr)) {
  704. /*
  705. * Allow one frame to elapse after start split
  706. * microframe before scheduling complete split, but
  707. * DON'T if we are doing the next start split in the
  708. * same frame for an ISOC out
  709. */
  710. if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
  711. qh->ep_is_in != 0) {
  712. qh->next_active_frame = dwc2_frame_num_inc(
  713. qh->next_active_frame, 1);
  714. }
  715. }
  716. } else {
  717. qh->next_active_frame =
  718. dwc2_frame_num_inc(qh->start_split_frame,
  719. qh->host_interval);
  720. if (dwc2_frame_num_le(qh->next_active_frame, frame_number))
  721. qh->next_active_frame = frame_number;
  722. qh->next_active_frame |= 0x7;
  723. qh->start_split_frame = qh->next_active_frame;
  724. }
  725. dwc2_sch_vdbg(hsotg, "QH=%p next(%d) fn=%04x, nxt=%04x=>%04x (%+d)\n",
  726. qh, sched_next_periodic_split, frame_number, old_frame,
  727. qh->next_active_frame,
  728. dwc2_frame_num_dec(qh->next_active_frame, old_frame));
  729. }
  730. /*
  731. * Deactivates a QH. For non-periodic QHs, removes the QH from the active
  732. * non-periodic schedule. The QH is added to the inactive non-periodic
  733. * schedule if any QTDs are still attached to the QH.
  734. *
  735. * For periodic QHs, the QH is removed from the periodic queued schedule. If
  736. * there are any QTDs still attached to the QH, the QH is added to either the
  737. * periodic inactive schedule or the periodic ready schedule and its next
  738. * scheduled frame is calculated. The QH is placed in the ready schedule if
  739. * the scheduled frame has been reached already. Otherwise it's placed in the
  740. * inactive schedule. If there are no QTDs attached to the QH, the QH is
  741. * completely removed from the periodic schedule.
  742. */
  743. void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
  744. int sched_next_periodic_split)
  745. {
  746. u16 frame_number;
  747. if (dbg_qh(qh))
  748. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  749. if (dwc2_qh_is_non_per(qh)) {
  750. dwc2_hcd_qh_unlink(hsotg, qh);
  751. if (!list_empty(&qh->qtd_list))
  752. /* Add back to inactive non-periodic schedule */
  753. dwc2_hcd_qh_add(hsotg, qh);
  754. return;
  755. }
  756. frame_number = dwc2_hcd_get_frame_number(hsotg);
  757. if (qh->do_split) {
  758. dwc2_sched_periodic_split(hsotg, qh, frame_number,
  759. sched_next_periodic_split);
  760. } else {
  761. qh->next_active_frame = dwc2_frame_num_inc(
  762. qh->next_active_frame, qh->host_interval);
  763. if (dwc2_frame_num_le(qh->next_active_frame, frame_number))
  764. qh->next_active_frame = frame_number;
  765. }
  766. if (list_empty(&qh->qtd_list)) {
  767. dwc2_hcd_qh_unlink(hsotg, qh);
  768. return;
  769. }
  770. /*
  771. * Remove from periodic_sched_queued and move to
  772. * appropriate queue
  773. */
  774. if ((hsotg->core_params->uframe_sched > 0 &&
  775. dwc2_frame_num_le(qh->next_active_frame, frame_number)) ||
  776. (hsotg->core_params->uframe_sched <= 0 &&
  777. qh->next_active_frame == frame_number))
  778. list_move_tail(&qh->qh_list_entry,
  779. &hsotg->periodic_sched_ready);
  780. else
  781. list_move_tail(&qh->qh_list_entry,
  782. &hsotg->periodic_sched_inactive);
  783. }
  784. /**
  785. * dwc2_hcd_qtd_init() - Initializes a QTD structure
  786. *
  787. * @qtd: The QTD to initialize
  788. * @urb: The associated URB
  789. */
  790. void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
  791. {
  792. qtd->urb = urb;
  793. if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
  794. USB_ENDPOINT_XFER_CONTROL) {
  795. /*
  796. * The only time the QTD data toggle is used is on the data
  797. * phase of control transfers. This phase always starts with
  798. * DATA1.
  799. */
  800. qtd->data_toggle = DWC2_HC_PID_DATA1;
  801. qtd->control_phase = DWC2_CONTROL_SETUP;
  802. }
  803. /* Start split */
  804. qtd->complete_split = 0;
  805. qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
  806. qtd->isoc_split_offset = 0;
  807. qtd->in_process = 0;
  808. /* Store the qtd ptr in the urb to reference the QTD */
  809. urb->qtd = qtd;
  810. }
  811. /**
  812. * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
  813. * Caller must hold driver lock.
  814. *
  815. * @hsotg: The DWC HCD structure
  816. * @qtd: The QTD to add
  817. * @qh: Queue head to add qtd to
  818. *
  819. * Return: 0 if successful, negative error code otherwise
  820. *
  821. * If the QH to which the QTD is added is not currently scheduled, it is placed
  822. * into the proper schedule based on its EP type.
  823. */
  824. int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
  825. struct dwc2_qh *qh)
  826. {
  827. int retval;
  828. if (unlikely(!qh)) {
  829. dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
  830. retval = -EINVAL;
  831. goto fail;
  832. }
  833. retval = dwc2_hcd_qh_add(hsotg, qh);
  834. if (retval)
  835. goto fail;
  836. qtd->qh = qh;
  837. list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
  838. return 0;
  839. fail:
  840. return retval;
  841. }