|
@@ -398,14 +398,21 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
|
|
spin_lock_irqsave(&xhci->lock, flags);
|
|
spin_lock_irqsave(&xhci->lock, flags);
|
|
for (i = LAST_EP_INDEX; i > 0; i--) {
|
|
for (i = LAST_EP_INDEX; i > 0; i--) {
|
|
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
|
|
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
|
|
|
|
+ struct xhci_ep_ctx *ep_ctx;
|
|
struct xhci_command *command;
|
|
struct xhci_command *command;
|
|
|
|
+
|
|
|
|
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
|
|
|
|
+
|
|
|
|
+ /* Check ep is running, required by AMD SNPS 3.1 xHC */
|
|
|
|
+ if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
command = xhci_alloc_command(xhci, false, false,
|
|
command = xhci_alloc_command(xhci, false, false,
|
|
GFP_NOWAIT);
|
|
GFP_NOWAIT);
|
|
if (!command) {
|
|
if (!command) {
|
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
xhci_free_command(xhci, cmd);
|
|
xhci_free_command(xhci, cmd);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
-
|
|
|
|
}
|
|
}
|
|
xhci_queue_stop_endpoint(xhci, command, slot_id, i,
|
|
xhci_queue_stop_endpoint(xhci, command, slot_id, i,
|
|
suspend);
|
|
suspend);
|