|
@@ -58,6 +58,9 @@
|
|
|
/* Wait this long before releasing periodic reservation */
|
|
|
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
|
|
|
|
|
|
+/* If we get a NAK, wait this long before retrying */
|
|
|
+#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
|
|
|
+
|
|
|
/**
|
|
|
* dwc2_periodic_channel_available() - Checks that a channel is available for a
|
|
|
* periodic transfer
|
|
@@ -1440,6 +1443,55 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
|
|
|
list_del_init(&qh->qh_list_entry);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * dwc2_wait_timer_fn() - Timer function to re-queue after waiting
|
|
|
+ *
|
|
|
+ * As per the spec, a NAK indicates that "a function is temporarily unable to
|
|
|
+ * transmit or receive data, but will eventually be able to do so without need
|
|
|
+ * of host intervention".
|
|
|
+ *
|
|
|
+ * That means that when we encounter a NAK we're supposed to retry.
|
|
|
+ *
|
|
|
+ * ...but if we retry right away (from the interrupt handler that saw the NAK)
|
|
|
+ * then we can end up with an interrupt storm (if the other side keeps NAKing
|
|
|
+ * us) because on slow enough CPUs it could take us longer to get out of the
|
|
|
+ * interrupt routine than it takes for the device to send another NAK. That
|
|
|
+ * leads to a constant stream of NAK interrupts and the CPU locks.
|
|
|
+ *
|
|
|
+ * ...so instead of retrying right away in the case of a NAK we'll set a timer
|
|
|
+ * to retry some time later. This function handles that timer and moves the
|
|
|
+ * qh back to the "inactive" list, then queues transactions.
|
|
|
+ *
|
|
|
+ * @t: Pointer to wait_timer in a qh.
|
|
|
+ */
|
|
|
+static void dwc2_wait_timer_fn(struct timer_list *t)
|
|
|
+{
|
|
|
+ struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
|
|
|
+ struct dwc2_hsotg *hsotg = qh->hsotg;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&hsotg->lock, flags);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We'll set wait_timer_cancel to true if we want to cancel this
|
|
|
+ * operation in dwc2_hcd_qh_unlink().
|
|
|
+ */
|
|
|
+ if (!qh->wait_timer_cancel) {
|
|
|
+ enum dwc2_transaction_type tr_type;
|
|
|
+
|
|
|
+ qh->want_wait = false;
|
|
|
+
|
|
|
+ list_move(&qh->qh_list_entry,
|
|
|
+ &hsotg->non_periodic_sched_inactive);
|
|
|
+
|
|
|
+ tr_type = dwc2_hcd_select_transactions(hsotg);
|
|
|
+ if (tr_type != DWC2_TRANSACTION_NONE)
|
|
|
+ dwc2_hcd_queue_transactions(hsotg, tr_type);
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&hsotg->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* dwc2_qh_init() - Initializes a QH structure
|
|
|
*
|
|
@@ -1468,6 +1520,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
|
|
|
/* Initialize QH */
|
|
|
qh->hsotg = hsotg;
|
|
|
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
|
|
|
+ timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
|
|
|
qh->ep_type = ep_type;
|
|
|
qh->ep_is_in = ep_is_in;
|
|
|
|
|
@@ -1628,6 +1681,16 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
|
|
dwc2_do_unreserve(hsotg, qh);
|
|
|
spin_unlock_irqrestore(&hsotg->lock, flags);
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We don't have the lock so we can safely wait until the wait timer
|
|
|
+ * finishes. Of course, at this point in time we'd better have set
|
|
|
+ * wait_timer_active to false so if this timer was still pending it
|
|
|
+ * won't do anything anyway, but we want it to finish before we free
|
|
|
+ * memory.
|
|
|
+ */
|
|
|
+ del_timer_sync(&qh->wait_timer);
|
|
|
+
|
|
|
dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
|
|
|
|
|
|
if (qh->desc_list)
|
|
@@ -1663,9 +1726,16 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
|
|
qh->start_active_frame = hsotg->frame_number;
|
|
|
qh->next_active_frame = qh->start_active_frame;
|
|
|
|
|
|
- /* Always start in inactive schedule */
|
|
|
- list_add_tail(&qh->qh_list_entry,
|
|
|
- &hsotg->non_periodic_sched_inactive);
|
|
|
+ if (qh->want_wait) {
|
|
|
+ list_add_tail(&qh->qh_list_entry,
|
|
|
+ &hsotg->non_periodic_sched_waiting);
|
|
|
+ qh->wait_timer_cancel = false;
|
|
|
+ mod_timer(&qh->wait_timer,
|
|
|
+ jiffies + DWC2_RETRY_WAIT_DELAY + 1);
|
|
|
+ } else {
|
|
|
+ list_add_tail(&qh->qh_list_entry,
|
|
|
+ &hsotg->non_periodic_sched_inactive);
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1695,6 +1765,9 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
|
|
|
|
|
|
dev_vdbg(hsotg->dev, "%s()\n", __func__);
|
|
|
|
|
|
+ /* If the wait_timer is pending, this will stop it from acting */
|
|
|
+ qh->wait_timer_cancel = true;
|
|
|
+
|
|
|
if (list_empty(&qh->qh_list_entry))
|
|
|
/* QH is not in a schedule */
|
|
|
return;
|
|
@@ -1903,7 +1976,7 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
|
|
|
if (dwc2_qh_is_non_per(qh)) {
|
|
|
dwc2_hcd_qh_unlink(hsotg, qh);
|
|
|
if (!list_empty(&qh->qtd_list))
|
|
|
- /* Add back to inactive non-periodic schedule */
|
|
|
+ /* Add back to inactive/waiting non-periodic schedule */
|
|
|
dwc2_hcd_qh_add(hsotg, qh);
|
|
|
return;
|
|
|
}
|