|
@@ -279,23 +279,76 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
|
|
|
readl(&xhci->dba->doorbell[0]);
|
|
|
}
|
|
|
|
|
|
-static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
|
|
|
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
|
|
|
+{
|
|
|
+ return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
|
|
|
+}
|
|
|
+
|
|
|
+static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
|
|
|
+{
|
|
|
+ return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
|
|
|
+ cmd_list);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
|
|
|
+ * If there are other commands waiting then restart the ring and kick the timer.
|
|
|
+ * This must be called with command ring stopped and xhci->lock held.
|
|
|
+ */
|
|
|
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_command *cur_cmd)
|
|
|
+{
|
|
|
+ struct xhci_command *i_cmd;
|
|
|
+ u32 cycle_state;
|
|
|
+
|
|
|
+ /* Turn all aborted commands in list to no-ops, then restart */
|
|
|
+ list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
|
|
|
+
|
|
|
+ if (i_cmd->status != COMP_CMD_ABORT)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ i_cmd->status = COMP_CMD_STOP;
|
|
|
+
|
|
|
+ xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
|
|
|
+ i_cmd->command_trb);
|
|
|
+ /* get cycle state from the original cmd trb */
|
|
|
+ cycle_state = le32_to_cpu(
|
|
|
+ i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
|
|
|
+ /* modify the command trb to no-op command */
|
|
|
+ i_cmd->command_trb->generic.field[0] = 0;
|
|
|
+ i_cmd->command_trb->generic.field[1] = 0;
|
|
|
+ i_cmd->command_trb->generic.field[2] = 0;
|
|
|
+ i_cmd->command_trb->generic.field[3] = cpu_to_le32(
|
|
|
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * caller waiting for completion is called when command
|
|
|
+ * completion event is received for these no-op commands
|
|
|
+ */
|
|
|
+ }
|
|
|
+
|
|
|
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
|
|
|
+
|
|
|
+ /* ring command ring doorbell to restart the command ring */
|
|
|
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
|
|
|
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
|
|
|
+ xhci->current_cmd = cur_cmd;
|
|
|
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
|
|
|
+ xhci_ring_cmd_db(xhci);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/* Must be called with xhci->lock held, releases and aquires lock back */
|
|
|
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
|
|
|
{
|
|
|
u64 temp_64;
|
|
|
int ret;
|
|
|
|
|
|
xhci_dbg(xhci, "Abort command ring\n");
|
|
|
|
|
|
- temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
|
|
- xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
|
|
|
+ reinit_completion(&xhci->cmd_ring_stop_completion);
|
|
|
|
|
|
- /*
|
|
|
- * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
|
|
|
- * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
|
|
|
- * but the completion event in never sent. Use the cmd timeout timer to
|
|
|
- * handle those cases. Use twice the time to cover the bit polling retry
|
|
|
- */
|
|
|
- mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
|
|
|
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
|
|
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
|
|
|
&xhci->op_regs->cmd_ring);
|
|
|
|
|
@@ -315,17 +368,30 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
|
|
|
udelay(1000);
|
|
|
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
|
|
|
CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
|
|
|
- if (ret == 0)
|
|
|
- return 0;
|
|
|
-
|
|
|
- xhci_err(xhci, "Stopped the command ring failed, "
|
|
|
- "maybe the host is dead\n");
|
|
|
- del_timer(&xhci->cmd_timer);
|
|
|
- xhci->xhc_state |= XHCI_STATE_DYING;
|
|
|
- xhci_halt(xhci);
|
|
|
- return -ESHUTDOWN;
|
|
|
+ if (ret < 0) {
|
|
|
+ xhci_err(xhci, "Stopped the command ring failed, "
|
|
|
+ "maybe the host is dead\n");
|
|
|
+ xhci->xhc_state |= XHCI_STATE_DYING;
|
|
|
+ xhci_halt(xhci);
|
|
|
+ return -ESHUTDOWN;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
|
|
|
+ * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
|
|
|
+ * but the completion event in never sent. Wait 2 secs (arbitrary
|
|
|
+ * number) to handle those cases after negation of CMD_RING_RUNNING.
|
|
|
+ */
|
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
+ ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
|
|
|
+ msecs_to_jiffies(2000));
|
|
|
+ spin_lock_irqsave(&xhci->lock, flags);
|
|
|
+ if (!ret) {
|
|
|
+ xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
|
|
|
+ xhci_cleanup_command_queue(xhci);
|
|
|
+ } else {
|
|
|
+ xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
|
|
|
}
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1207,101 +1273,62 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
|
|
|
xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Turn all commands on command ring with status set to "aborted" to no-op trbs.
|
|
|
- * If there are other commands waiting then restart the ring and kick the timer.
|
|
|
- * This must be called with command ring stopped and xhci->lock held.
|
|
|
- */
|
|
|
-static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
|
|
- struct xhci_command *cur_cmd)
|
|
|
-{
|
|
|
- struct xhci_command *i_cmd, *tmp_cmd;
|
|
|
- u32 cycle_state;
|
|
|
-
|
|
|
- /* Turn all aborted commands in list to no-ops, then restart */
|
|
|
- list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
|
|
|
- cmd_list) {
|
|
|
-
|
|
|
- if (i_cmd->status != COMP_CMD_ABORT)
|
|
|
- continue;
|
|
|
-
|
|
|
- i_cmd->status = COMP_CMD_STOP;
|
|
|
-
|
|
|
- xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
|
|
|
- i_cmd->command_trb);
|
|
|
- /* get cycle state from the original cmd trb */
|
|
|
- cycle_state = le32_to_cpu(
|
|
|
- i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
|
|
|
- /* modify the command trb to no-op command */
|
|
|
- i_cmd->command_trb->generic.field[0] = 0;
|
|
|
- i_cmd->command_trb->generic.field[1] = 0;
|
|
|
- i_cmd->command_trb->generic.field[2] = 0;
|
|
|
- i_cmd->command_trb->generic.field[3] = cpu_to_le32(
|
|
|
- TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
|
|
|
-
|
|
|
- /*
|
|
|
- * caller waiting for completion is called when command
|
|
|
- * completion event is received for these no-op commands
|
|
|
- */
|
|
|
- }
|
|
|
-
|
|
|
- xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
|
|
|
-
|
|
|
- /* ring command ring doorbell to restart the command ring */
|
|
|
- if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
|
|
|
- !(xhci->xhc_state & XHCI_STATE_DYING)) {
|
|
|
- xhci->current_cmd = cur_cmd;
|
|
|
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
|
|
|
- xhci_ring_cmd_db(xhci);
|
|
|
- }
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-void xhci_handle_command_timeout(unsigned long data)
|
|
|
+void xhci_handle_command_timeout(struct work_struct *work)
|
|
|
{
|
|
|
struct xhci_hcd *xhci;
|
|
|
int ret;
|
|
|
unsigned long flags;
|
|
|
u64 hw_ring_state;
|
|
|
- bool second_timeout = false;
|
|
|
- xhci = (struct xhci_hcd *) data;
|
|
|
|
|
|
- /* mark this command to be cancelled */
|
|
|
+ xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
|
|
|
+
|
|
|
spin_lock_irqsave(&xhci->lock, flags);
|
|
|
- if (xhci->current_cmd) {
|
|
|
- if (xhci->current_cmd->status == COMP_CMD_ABORT)
|
|
|
- second_timeout = true;
|
|
|
- xhci->current_cmd->status = COMP_CMD_ABORT;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If timeout work is pending, or current_cmd is NULL, it means we
|
|
|
+ * raced with command completion. Command is handled so just return.
|
|
|
+ */
|
|
|
+ if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
|
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
+ return;
|
|
|
}
|
|
|
+ /* mark this command to be cancelled */
|
|
|
+ xhci->current_cmd->status = COMP_CMD_ABORT;
|
|
|
|
|
|
/* Make sure command ring is running before aborting it */
|
|
|
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
|
|
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
|
|
|
(hw_ring_state & CMD_RING_RUNNING)) {
|
|
|
- spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
+ /* Prevent new doorbell, and start command abort */
|
|
|
+ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
|
|
|
xhci_dbg(xhci, "Command timeout\n");
|
|
|
- ret = xhci_abort_cmd_ring(xhci);
|
|
|
+ ret = xhci_abort_cmd_ring(xhci, flags);
|
|
|
if (unlikely(ret == -ESHUTDOWN)) {
|
|
|
xhci_err(xhci, "Abort command ring failed\n");
|
|
|
xhci_cleanup_command_queue(xhci);
|
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
|
|
|
xhci_dbg(xhci, "xHCI host controller is dead.\n");
|
|
|
+
|
|
|
+ return;
|
|
|
}
|
|
|
- return;
|
|
|
+
|
|
|
+ goto time_out_completed;
|
|
|
}
|
|
|
|
|
|
- /* command ring failed to restart, or host removed. Bail out */
|
|
|
- if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
|
|
|
- spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
- xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
|
|
|
+ /* host removed. Bail out */
|
|
|
+ if (xhci->xhc_state & XHCI_STATE_REMOVING) {
|
|
|
+ xhci_dbg(xhci, "host removed, ring start fail?\n");
|
|
|
xhci_cleanup_command_queue(xhci);
|
|
|
- return;
|
|
|
+
|
|
|
+ goto time_out_completed;
|
|
|
}
|
|
|
|
|
|
/* command timeout on stopped ring, ring can't be aborted */
|
|
|
xhci_dbg(xhci, "Command timeout on stopped ring\n");
|
|
|
xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
|
|
|
+
|
|
|
+time_out_completed:
|
|
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
return;
|
|
|
}
|
|
@@ -1333,7 +1360,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
|
|
|
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
|
|
|
|
|
|
- del_timer(&xhci->cmd_timer);
|
|
|
+ cancel_delayed_work(&xhci->cmd_timer);
|
|
|
|
|
|
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
|
|
|
|
|
@@ -1341,7 +1368,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
|
|
|
/* If CMD ring stopped we own the trbs between enqueue and dequeue */
|
|
|
if (cmd_comp_code == COMP_CMD_STOP) {
|
|
|
- xhci_handle_stopped_cmd_ring(xhci, cmd);
|
|
|
+ complete_all(&xhci->cmd_ring_stop_completion);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -1359,8 +1386,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
*/
|
|
|
if (cmd_comp_code == COMP_CMD_ABORT) {
|
|
|
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
|
|
|
- if (cmd->status == COMP_CMD_ABORT)
|
|
|
+ if (cmd->status == COMP_CMD_ABORT) {
|
|
|
+ if (xhci->current_cmd == cmd)
|
|
|
+ xhci->current_cmd = NULL;
|
|
|
goto event_handled;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
|
|
@@ -1421,7 +1451,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
if (cmd->cmd_list.next != &xhci->cmd_list) {
|
|
|
xhci->current_cmd = list_entry(cmd->cmd_list.next,
|
|
|
struct xhci_command, cmd_list);
|
|
|
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
|
|
|
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
|
|
|
+ } else if (xhci->current_cmd == cmd) {
|
|
|
+ xhci->current_cmd = NULL;
|
|
|
}
|
|
|
|
|
|
event_handled:
|
|
@@ -1939,8 +1971,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
struct xhci_ep_ctx *ep_ctx;
|
|
|
u32 trb_comp_code;
|
|
|
u32 remaining, requested;
|
|
|
- bool on_data_stage;
|
|
|
+ u32 trb_type;
|
|
|
|
|
|
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
|
|
|
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
|
|
|
xdev = xhci->devs[slot_id];
|
|
|
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
|
|
@@ -1950,14 +1983,11 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
requested = td->urb->transfer_buffer_length;
|
|
|
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
|
|
|
|
|
|
- /* not setup (dequeue), or status stage means we are at data stage */
|
|
|
- on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
|
|
|
-
|
|
|
switch (trb_comp_code) {
|
|
|
case COMP_SUCCESS:
|
|
|
- if (ep_trb != td->last_trb) {
|
|
|
+ if (trb_type != TRB_STATUS) {
|
|
|
xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
|
|
|
- on_data_stage ? "data" : "setup");
|
|
|
+ (trb_type == TRB_DATA) ? "data" : "setup");
|
|
|
*status = -ESHUTDOWN;
|
|
|
break;
|
|
|
}
|
|
@@ -1967,15 +1997,25 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
*status = 0;
|
|
|
break;
|
|
|
case COMP_STOP_SHORT:
|
|
|
- if (on_data_stage)
|
|
|
+ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
|
|
|
td->urb->actual_length = remaining;
|
|
|
else
|
|
|
xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
|
|
|
goto finish_td;
|
|
|
case COMP_STOP:
|
|
|
- if (on_data_stage)
|
|
|
+ switch (trb_type) {
|
|
|
+ case TRB_SETUP:
|
|
|
+ td->urb->actual_length = 0;
|
|
|
+ goto finish_td;
|
|
|
+ case TRB_DATA:
|
|
|
+ case TRB_NORMAL:
|
|
|
td->urb->actual_length = requested - remaining;
|
|
|
- goto finish_td;
|
|
|
+ goto finish_td;
|
|
|
+ default:
|
|
|
+ xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
|
|
|
+ trb_type);
|
|
|
+ goto finish_td;
|
|
|
+ }
|
|
|
case COMP_STOP_INVAL:
|
|
|
goto finish_td;
|
|
|
default:
|
|
@@ -1987,7 +2027,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
/* else fall through */
|
|
|
case COMP_STALL:
|
|
|
/* Did we transfer part of the data (middle) phase? */
|
|
|
- if (on_data_stage)
|
|
|
+ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
|
|
|
td->urb->actual_length = requested - remaining;
|
|
|
else if (!td->urb_length_set)
|
|
|
td->urb->actual_length = 0;
|
|
@@ -1995,14 +2035,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
}
|
|
|
|
|
|
/* stopped at setup stage, no data transferred */
|
|
|
- if (ep_trb == ep_ring->dequeue)
|
|
|
+ if (trb_type == TRB_SETUP)
|
|
|
goto finish_td;
|
|
|
|
|
|
/*
|
|
|
* if on data stage then update the actual_length of the URB and flag it
|
|
|
* as set, so it won't be overwritten in the event for the last TRB.
|
|
|
*/
|
|
|
- if (on_data_stage) {
|
|
|
+ if (trb_type == TRB_DATA ||
|
|
|
+ trb_type == TRB_NORMAL) {
|
|
|
td->urb_length_set = true;
|
|
|
td->urb->actual_length = requested - remaining;
|
|
|
xhci_dbg(xhci, "Waiting for status stage event\n");
|
|
@@ -3790,9 +3831,9 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
|
|
|
|
|
|
/* if there are no other commands queued we start the timeout timer */
|
|
|
if (xhci->cmd_list.next == &cmd->cmd_list &&
|
|
|
- !timer_pending(&xhci->cmd_timer)) {
|
|
|
+ !delayed_work_pending(&xhci->cmd_timer)) {
|
|
|
xhci->current_cmd = cmd;
|
|
|
- mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
|
|
|
+ xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
|
|
|
}
|
|
|
|
|
|
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
|