|
@@ -301,28 +301,6 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
-void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
|
|
|
|
-{
|
|
|
|
- u64 temp;
|
|
|
|
- dma_addr_t deq;
|
|
|
|
-
|
|
|
|
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
|
|
|
|
- xhci->event_ring->dequeue);
|
|
|
|
- if (deq == 0 && !in_interrupt())
|
|
|
|
- xhci_warn(xhci, "WARN something wrong with SW event ring "
|
|
|
|
- "dequeue ptr.\n");
|
|
|
|
- /* Update HC event ring dequeue pointer */
|
|
|
|
- temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
|
|
|
- temp &= ERST_PTR_MASK;
|
|
|
|
- /* Don't clear the EHB bit (which is RW1C) because
|
|
|
|
- * there might be more events to service.
|
|
|
|
- */
|
|
|
|
- temp &= ~ERST_EHB;
|
|
|
|
- xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
|
|
|
|
- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
|
|
|
|
- &xhci->ir_set->erst_dequeue);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* Ring the host controller doorbell after placing a command on the ring */
|
|
/* Ring the host controller doorbell after placing a command on the ring */
|
|
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
|
|
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
|
|
{
|
|
{
|
|
@@ -359,11 +337,6 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
|
|
field = xhci_readl(xhci, db_addr) & DB_MASK;
|
|
field = xhci_readl(xhci, db_addr) & DB_MASK;
|
|
field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
|
|
field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
|
|
xhci_writel(xhci, field, db_addr);
|
|
xhci_writel(xhci, field, db_addr);
|
|
- /* Flush PCI posted writes - FIXME Matthew Wilcox says this
|
|
|
|
- * isn't time-critical and we shouldn't make the CPU wait for
|
|
|
|
- * the flush.
|
|
|
|
- */
|
|
|
|
- xhci_readl(xhci, db_addr);
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -419,6 +392,50 @@ static struct xhci_segment *find_trb_seg(
|
|
return cur_seg;
|
|
return cur_seg;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
|
|
+static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
|
|
|
|
+ unsigned int slot_id, unsigned int ep_index,
|
|
|
|
+ unsigned int stream_id)
|
|
|
|
+{
|
|
|
|
+ struct xhci_virt_ep *ep;
|
|
|
|
+
|
|
|
|
+ ep = &xhci->devs[slot_id]->eps[ep_index];
|
|
|
|
+ /* Common case: no streams */
|
|
|
|
+ if (!(ep->ep_state & EP_HAS_STREAMS))
|
|
|
|
+ return ep->ring;
|
|
|
|
+
|
|
|
|
+ if (stream_id == 0) {
|
|
|
|
+ xhci_warn(xhci,
|
|
|
|
+ "WARN: Slot ID %u, ep index %u has streams, "
|
|
|
|
+ "but URB has no stream ID.\n",
|
|
|
|
+ slot_id, ep_index);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (stream_id < ep->stream_info->num_streams)
|
|
|
|
+ return ep->stream_info->stream_rings[stream_id];
|
|
|
|
+
|
|
|
|
+ xhci_warn(xhci,
|
|
|
|
+ "WARN: Slot ID %u, ep index %u has "
|
|
|
|
+ "stream IDs 1 to %u allocated, "
|
|
|
|
+ "but stream ID %u is requested.\n",
|
|
|
|
+ slot_id, ep_index,
|
|
|
|
+ ep->stream_info->num_streams - 1,
|
|
|
|
+ stream_id);
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Get the right ring for the given URB.
|
|
|
|
+ * If the endpoint supports streams, boundary check the URB's stream ID.
|
|
|
|
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
|
|
|
|
+ */
|
|
|
|
+static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
|
|
|
|
+ struct urb *urb)
|
|
|
|
+{
|
|
|
|
+ return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
|
|
|
|
+ xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Move the xHC's endpoint ring dequeue pointer past cur_td.
|
|
* Move the xHC's endpoint ring dequeue pointer past cur_td.
|
|
* Record the new state of the xHC's endpoint ring dequeue segment,
|
|
* Record the new state of the xHC's endpoint ring dequeue segment,
|
|
@@ -578,16 +595,24 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
|
|
struct xhci_td *cur_td, int status, char *adjective)
|
|
struct xhci_td *cur_td, int status, char *adjective)
|
|
{
|
|
{
|
|
struct usb_hcd *hcd = xhci_to_hcd(xhci);
|
|
struct usb_hcd *hcd = xhci_to_hcd(xhci);
|
|
|
|
+ struct urb *urb;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
|
|
|
|
- cur_td->urb->hcpriv = NULL;
|
|
|
|
- usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
|
|
|
|
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
|
|
|
|
|
|
+ urb = cur_td->urb;
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+ urb_priv->td_cnt++;
|
|
|
|
|
|
- spin_unlock(&xhci->lock);
|
|
|
|
- usb_hcd_giveback_urb(hcd, cur_td->urb, status);
|
|
|
|
- kfree(cur_td);
|
|
|
|
- spin_lock(&xhci->lock);
|
|
|
|
- xhci_dbg(xhci, "%s URB given back\n", adjective);
|
|
|
|
|
|
+ /* Only giveback urb when this is the last td in urb */
|
|
|
|
+ if (urb_priv->td_cnt == urb_priv->length) {
|
|
|
|
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
|
|
|
|
+ xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
|
|
|
|
+
|
|
|
|
+ spin_unlock(&xhci->lock);
|
|
|
|
+ usb_hcd_giveback_urb(hcd, urb, status);
|
|
|
|
+ xhci_urb_free_priv(xhci, urb_priv);
|
|
|
|
+ spin_lock(&xhci->lock);
|
|
|
|
+ xhci_dbg(xhci, "%s URB given back\n", adjective);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1132,7 +1157,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
|
|
|
|
|
|
/* Update event ring dequeue pointer before dropping the lock */
|
|
/* Update event ring dequeue pointer before dropping the lock */
|
|
inc_deq(xhci, xhci->event_ring, true);
|
|
inc_deq(xhci, xhci->event_ring, true);
|
|
- xhci_set_hc_event_deq(xhci);
|
|
|
|
|
|
|
|
spin_unlock(&xhci->lock);
|
|
spin_unlock(&xhci->lock);
|
|
/* Pass this up to the core */
|
|
/* Pass this up to the core */
|
|
@@ -1258,306 +1282,33 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * If this function returns an error condition, it means it got a Transfer
|
|
|
|
- * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
|
|
|
|
- * At this point, the host controller is probably hosed and should be reset.
|
|
|
|
|
|
+ * Finish the td processing, remove the td from td list;
|
|
|
|
+ * Return 1 if the urb can be given back.
|
|
*/
|
|
*/
|
|
-static int handle_tx_event(struct xhci_hcd *xhci,
|
|
|
|
- struct xhci_transfer_event *event)
|
|
|
|
|
|
+static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
|
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
|
|
|
|
+ struct xhci_virt_ep *ep, int *status, bool skip)
|
|
{
|
|
{
|
|
struct xhci_virt_device *xdev;
|
|
struct xhci_virt_device *xdev;
|
|
- struct xhci_virt_ep *ep;
|
|
|
|
struct xhci_ring *ep_ring;
|
|
struct xhci_ring *ep_ring;
|
|
unsigned int slot_id;
|
|
unsigned int slot_id;
|
|
int ep_index;
|
|
int ep_index;
|
|
- struct xhci_td *td = NULL;
|
|
|
|
- dma_addr_t event_dma;
|
|
|
|
- struct xhci_segment *event_seg;
|
|
|
|
- union xhci_trb *event_trb;
|
|
|
|
struct urb *urb = NULL;
|
|
struct urb *urb = NULL;
|
|
- int status = -EINPROGRESS;
|
|
|
|
struct xhci_ep_ctx *ep_ctx;
|
|
struct xhci_ep_ctx *ep_ctx;
|
|
|
|
+ int ret = 0;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
u32 trb_comp_code;
|
|
u32 trb_comp_code;
|
|
|
|
|
|
- xhci_dbg(xhci, "In %s\n", __func__);
|
|
|
|
slot_id = TRB_TO_SLOT_ID(event->flags);
|
|
slot_id = TRB_TO_SLOT_ID(event->flags);
|
|
xdev = xhci->devs[slot_id];
|
|
xdev = xhci->devs[slot_id];
|
|
- if (!xdev) {
|
|
|
|
- xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
|
|
|
|
- return -ENODEV;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Endpoint ID is 1 based, our index is zero based */
|
|
|
|
ep_index = TRB_TO_EP_ID(event->flags) - 1;
|
|
ep_index = TRB_TO_EP_ID(event->flags) - 1;
|
|
- xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
|
|
|
|
- ep = &xdev->eps[ep_index];
|
|
|
|
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
|
|
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
|
|
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
- if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
|
|
|
|
- xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
|
|
|
|
- "or incorrect stream ring\n");
|
|
|
|
- return -ENODEV;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- event_dma = event->buffer;
|
|
|
|
- /* This TRB should be in the TD at the head of this ring's TD list */
|
|
|
|
- xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
|
|
|
|
- if (list_empty(&ep_ring->td_list)) {
|
|
|
|
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
|
|
|
|
- TRB_TO_SLOT_ID(event->flags), ep_index);
|
|
|
|
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
|
|
|
|
- (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
|
|
|
|
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
|
|
|
|
- urb = NULL;
|
|
|
|
- goto cleanup;
|
|
|
|
- }
|
|
|
|
- xhci_dbg(xhci, "%s - getting list entry\n", __func__);
|
|
|
|
- td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
|
|
|
|
-
|
|
|
|
- /* Is this a TRB in the currently executing TD? */
|
|
|
|
- xhci_dbg(xhci, "%s - looking for TD\n", __func__);
|
|
|
|
- event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
|
|
|
|
- td->last_trb, event_dma);
|
|
|
|
- xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
|
|
|
|
- if (!event_seg) {
|
|
|
|
- /* HC is busted, give up! */
|
|
|
|
- xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
|
|
|
|
- return -ESHUTDOWN;
|
|
|
|
- }
|
|
|
|
- event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
|
|
|
|
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
|
|
|
|
- (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
|
|
|
|
- xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
|
|
|
|
- lower_32_bits(event->buffer));
|
|
|
|
- xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
|
|
|
|
- upper_32_bits(event->buffer));
|
|
|
|
- xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
|
|
|
|
- (unsigned int) event->transfer_len);
|
|
|
|
- xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
|
|
|
|
- (unsigned int) event->flags);
|
|
|
|
-
|
|
|
|
- /* Look for common error cases */
|
|
|
|
trb_comp_code = GET_COMP_CODE(event->transfer_len);
|
|
trb_comp_code = GET_COMP_CODE(event->transfer_len);
|
|
- switch (trb_comp_code) {
|
|
|
|
- /* Skip codes that require special handling depending on
|
|
|
|
- * transfer type
|
|
|
|
- */
|
|
|
|
- case COMP_SUCCESS:
|
|
|
|
- case COMP_SHORT_TX:
|
|
|
|
- break;
|
|
|
|
- case COMP_STOP:
|
|
|
|
- xhci_dbg(xhci, "Stopped on Transfer TRB\n");
|
|
|
|
- break;
|
|
|
|
- case COMP_STOP_INVAL:
|
|
|
|
- xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
|
|
|
|
- break;
|
|
|
|
- case COMP_STALL:
|
|
|
|
- xhci_warn(xhci, "WARN: Stalled endpoint\n");
|
|
|
|
- ep->ep_state |= EP_HALTED;
|
|
|
|
- status = -EPIPE;
|
|
|
|
- break;
|
|
|
|
- case COMP_TRB_ERR:
|
|
|
|
- xhci_warn(xhci, "WARN: TRB error on endpoint\n");
|
|
|
|
- status = -EILSEQ;
|
|
|
|
- break;
|
|
|
|
- case COMP_SPLIT_ERR:
|
|
|
|
- case COMP_TX_ERR:
|
|
|
|
- xhci_warn(xhci, "WARN: transfer error on endpoint\n");
|
|
|
|
- status = -EPROTO;
|
|
|
|
- break;
|
|
|
|
- case COMP_BABBLE:
|
|
|
|
- xhci_warn(xhci, "WARN: babble error on endpoint\n");
|
|
|
|
- status = -EOVERFLOW;
|
|
|
|
- break;
|
|
|
|
- case COMP_DB_ERR:
|
|
|
|
- xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
|
|
|
|
- status = -ENOSR;
|
|
|
|
- break;
|
|
|
|
- default:
|
|
|
|
- if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
|
|
|
|
- status = 0;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
|
|
|
|
- urb = NULL;
|
|
|
|
- goto cleanup;
|
|
|
|
- }
|
|
|
|
- /* Now update the urb's actual_length and give back to the core */
|
|
|
|
- /* Was this a control transfer? */
|
|
|
|
- if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
|
|
|
|
- xhci_debug_trb(xhci, xhci->event_ring->dequeue);
|
|
|
|
- switch (trb_comp_code) {
|
|
|
|
- case COMP_SUCCESS:
|
|
|
|
- if (event_trb == ep_ring->dequeue) {
|
|
|
|
- xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
|
|
|
|
- status = -ESHUTDOWN;
|
|
|
|
- } else if (event_trb != td->last_trb) {
|
|
|
|
- xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
|
|
|
|
- status = -ESHUTDOWN;
|
|
|
|
- } else {
|
|
|
|
- xhci_dbg(xhci, "Successful control transfer!\n");
|
|
|
|
- status = 0;
|
|
|
|
- }
|
|
|
|
- break;
|
|
|
|
- case COMP_SHORT_TX:
|
|
|
|
- xhci_warn(xhci, "WARN: short transfer on control ep\n");
|
|
|
|
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
- status = -EREMOTEIO;
|
|
|
|
- else
|
|
|
|
- status = 0;
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- default:
|
|
|
|
- if (!xhci_requires_manual_halt_cleanup(xhci,
|
|
|
|
- ep_ctx, trb_comp_code))
|
|
|
|
- break;
|
|
|
|
- xhci_dbg(xhci, "TRB error code %u, "
|
|
|
|
- "halted endpoint index = %u\n",
|
|
|
|
- trb_comp_code, ep_index);
|
|
|
|
- /* else fall through */
|
|
|
|
- case COMP_STALL:
|
|
|
|
- /* Did we transfer part of the data (middle) phase? */
|
|
|
|
- if (event_trb != ep_ring->dequeue &&
|
|
|
|
- event_trb != td->last_trb)
|
|
|
|
- td->urb->actual_length =
|
|
|
|
- td->urb->transfer_buffer_length
|
|
|
|
- - TRB_LEN(event->transfer_len);
|
|
|
|
- else
|
|
|
|
- td->urb->actual_length = 0;
|
|
|
|
|
|
|
|
- xhci_cleanup_halted_endpoint(xhci,
|
|
|
|
- slot_id, ep_index, 0, td, event_trb);
|
|
|
|
- goto td_cleanup;
|
|
|
|
- }
|
|
|
|
- /*
|
|
|
|
- * Did we transfer any data, despite the errors that might have
|
|
|
|
- * happened? I.e. did we get past the setup stage?
|
|
|
|
- */
|
|
|
|
- if (event_trb != ep_ring->dequeue) {
|
|
|
|
- /* The event was for the status stage */
|
|
|
|
- if (event_trb == td->last_trb) {
|
|
|
|
- if (td->urb->actual_length != 0) {
|
|
|
|
- /* Don't overwrite a previously set error code */
|
|
|
|
- if ((status == -EINPROGRESS ||
|
|
|
|
- status == 0) &&
|
|
|
|
- (td->urb->transfer_flags
|
|
|
|
- & URB_SHORT_NOT_OK))
|
|
|
|
- /* Did we already see a short data stage? */
|
|
|
|
- status = -EREMOTEIO;
|
|
|
|
- } else {
|
|
|
|
- td->urb->actual_length =
|
|
|
|
- td->urb->transfer_buffer_length;
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- /* Maybe the event was for the data stage? */
|
|
|
|
- if (trb_comp_code != COMP_STOP_INVAL) {
|
|
|
|
- /* We didn't stop on a link TRB in the middle */
|
|
|
|
- td->urb->actual_length =
|
|
|
|
- td->urb->transfer_buffer_length -
|
|
|
|
- TRB_LEN(event->transfer_len);
|
|
|
|
- xhci_dbg(xhci, "Waiting for status stage event\n");
|
|
|
|
- urb = NULL;
|
|
|
|
- goto cleanup;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- switch (trb_comp_code) {
|
|
|
|
- case COMP_SUCCESS:
|
|
|
|
- /* Double check that the HW transferred everything. */
|
|
|
|
- if (event_trb != td->last_trb) {
|
|
|
|
- xhci_warn(xhci, "WARN Successful completion "
|
|
|
|
- "on short TX\n");
|
|
|
|
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
- status = -EREMOTEIO;
|
|
|
|
- else
|
|
|
|
- status = 0;
|
|
|
|
- } else {
|
|
|
|
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
|
|
|
|
- xhci_dbg(xhci, "Successful bulk "
|
|
|
|
- "transfer!\n");
|
|
|
|
- else
|
|
|
|
- xhci_dbg(xhci, "Successful interrupt "
|
|
|
|
- "transfer!\n");
|
|
|
|
- status = 0;
|
|
|
|
- }
|
|
|
|
- break;
|
|
|
|
- case COMP_SHORT_TX:
|
|
|
|
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
- status = -EREMOTEIO;
|
|
|
|
- else
|
|
|
|
- status = 0;
|
|
|
|
- break;
|
|
|
|
- default:
|
|
|
|
- /* Others already handled above */
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- dev_dbg(&td->urb->dev->dev,
|
|
|
|
- "ep %#x - asked for %d bytes, "
|
|
|
|
- "%d bytes untransferred\n",
|
|
|
|
- td->urb->ep->desc.bEndpointAddress,
|
|
|
|
- td->urb->transfer_buffer_length,
|
|
|
|
- TRB_LEN(event->transfer_len));
|
|
|
|
- /* Fast path - was this the last TRB in the TD for this URB? */
|
|
|
|
- if (event_trb == td->last_trb) {
|
|
|
|
- if (TRB_LEN(event->transfer_len) != 0) {
|
|
|
|
- td->urb->actual_length =
|
|
|
|
- td->urb->transfer_buffer_length -
|
|
|
|
- TRB_LEN(event->transfer_len);
|
|
|
|
- if (td->urb->transfer_buffer_length <
|
|
|
|
- td->urb->actual_length) {
|
|
|
|
- xhci_warn(xhci, "HC gave bad length "
|
|
|
|
- "of %d bytes left\n",
|
|
|
|
- TRB_LEN(event->transfer_len));
|
|
|
|
- td->urb->actual_length = 0;
|
|
|
|
- if (td->urb->transfer_flags &
|
|
|
|
- URB_SHORT_NOT_OK)
|
|
|
|
- status = -EREMOTEIO;
|
|
|
|
- else
|
|
|
|
- status = 0;
|
|
|
|
- }
|
|
|
|
- /* Don't overwrite a previously set error code */
|
|
|
|
- if (status == -EINPROGRESS) {
|
|
|
|
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
- status = -EREMOTEIO;
|
|
|
|
- else
|
|
|
|
- status = 0;
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- td->urb->actual_length = td->urb->transfer_buffer_length;
|
|
|
|
- /* Ignore a short packet completion if the
|
|
|
|
- * untransferred length was zero.
|
|
|
|
- */
|
|
|
|
- if (status == -EREMOTEIO)
|
|
|
|
- status = 0;
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- /* Slow path - walk the list, starting from the dequeue
|
|
|
|
- * pointer, to get the actual length transferred.
|
|
|
|
- */
|
|
|
|
- union xhci_trb *cur_trb;
|
|
|
|
- struct xhci_segment *cur_seg;
|
|
|
|
|
|
+ if (skip)
|
|
|
|
+ goto td_cleanup;
|
|
|
|
|
|
- td->urb->actual_length = 0;
|
|
|
|
- for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
|
|
|
|
- cur_trb != event_trb;
|
|
|
|
- next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
|
|
|
|
- if ((cur_trb->generic.field[3] &
|
|
|
|
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
|
|
|
|
- (cur_trb->generic.field[3] &
|
|
|
|
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
|
|
|
|
- td->urb->actual_length +=
|
|
|
|
- TRB_LEN(cur_trb->generic.field[2]);
|
|
|
|
- }
|
|
|
|
- /* If the ring didn't stop on a Link or No-op TRB, add
|
|
|
|
- * in the actual bytes transferred from the Normal TRB
|
|
|
|
- */
|
|
|
|
- if (trb_comp_code != COMP_STOP_INVAL)
|
|
|
|
- td->urb->actual_length +=
|
|
|
|
- TRB_LEN(cur_trb->generic.field[2]) -
|
|
|
|
- TRB_LEN(event->transfer_len);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
if (trb_comp_code == COMP_STOP_INVAL ||
|
|
if (trb_comp_code == COMP_STOP_INVAL ||
|
|
trb_comp_code == COMP_STOP) {
|
|
trb_comp_code == COMP_STOP) {
|
|
/* The Endpoint Stop Command completion will take care of any
|
|
/* The Endpoint Stop Command completion will take care of any
|
|
@@ -1566,6 +1317,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|
*/
|
|
*/
|
|
ep->stopped_td = td;
|
|
ep->stopped_td = td;
|
|
ep->stopped_trb = event_trb;
|
|
ep->stopped_trb = event_trb;
|
|
|
|
+ return 0;
|
|
} else {
|
|
} else {
|
|
if (trb_comp_code == COMP_STALL) {
|
|
if (trb_comp_code == COMP_STALL) {
|
|
/* The transfer is completed from the driver's
|
|
/* The transfer is completed from the driver's
|
|
@@ -1586,7 +1338,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|
* xHCI hardware manually.
|
|
* xHCI hardware manually.
|
|
*/
|
|
*/
|
|
xhci_cleanup_halted_endpoint(xhci,
|
|
xhci_cleanup_halted_endpoint(xhci,
|
|
- slot_id, ep_index, ep_ring->stream_id, td, event_trb);
|
|
|
|
|
|
+ slot_id, ep_index, ep_ring->stream_id,
|
|
|
|
+ td, event_trb);
|
|
} else {
|
|
} else {
|
|
/* Update ring dequeue pointer */
|
|
/* Update ring dequeue pointer */
|
|
while (ep_ring->dequeue != td->last_trb)
|
|
while (ep_ring->dequeue != td->last_trb)
|
|
@@ -1597,6 +1350,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|
td_cleanup:
|
|
td_cleanup:
|
|
/* Clean up the endpoint's TD list */
|
|
/* Clean up the endpoint's TD list */
|
|
urb = td->urb;
|
|
urb = td->urb;
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+
|
|
/* Do one last check of the actual transfer length.
|
|
/* Do one last check of the actual transfer length.
|
|
* If the host controller said we transferred more data than
|
|
* If the host controller said we transferred more data than
|
|
* the buffer length, urb->actual_length will be a very big
|
|
* the buffer length, urb->actual_length will be a very big
|
|
@@ -1611,114 +1366,757 @@ td_cleanup:
|
|
urb->actual_length);
|
|
urb->actual_length);
|
|
urb->actual_length = 0;
|
|
urb->actual_length = 0;
|
|
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
- status = -EREMOTEIO;
|
|
|
|
|
|
+ *status = -EREMOTEIO;
|
|
else
|
|
else
|
|
- status = 0;
|
|
|
|
|
|
+ *status = 0;
|
|
}
|
|
}
|
|
list_del(&td->td_list);
|
|
list_del(&td->td_list);
|
|
/* Was this TD slated to be cancelled but completed anyway? */
|
|
/* Was this TD slated to be cancelled but completed anyway? */
|
|
if (!list_empty(&td->cancelled_td_list))
|
|
if (!list_empty(&td->cancelled_td_list))
|
|
list_del(&td->cancelled_td_list);
|
|
list_del(&td->cancelled_td_list);
|
|
|
|
|
|
- /* Leave the TD around for the reset endpoint function to use
|
|
|
|
- * (but only if it's not a control endpoint, since we already
|
|
|
|
- * queued the Set TR dequeue pointer command for stalled
|
|
|
|
- * control endpoints).
|
|
|
|
- */
|
|
|
|
- if (usb_endpoint_xfer_control(&urb->ep->desc) ||
|
|
|
|
- (trb_comp_code != COMP_STALL &&
|
|
|
|
- trb_comp_code != COMP_BABBLE)) {
|
|
|
|
- kfree(td);
|
|
|
|
- }
|
|
|
|
- urb->hcpriv = NULL;
|
|
|
|
|
|
+ urb_priv->td_cnt++;
|
|
|
|
+ /* Giveback the urb when all the tds are completed */
|
|
|
|
+ if (urb_priv->td_cnt == urb_priv->length)
|
|
|
|
+ ret = 1;
|
|
}
|
|
}
|
|
-cleanup:
|
|
|
|
- inc_deq(xhci, xhci->event_ring, true);
|
|
|
|
- xhci_set_hc_event_deq(xhci);
|
|
|
|
|
|
|
|
- /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
|
|
|
|
- if (urb) {
|
|
|
|
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
|
|
|
|
- xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
|
|
|
|
- urb, urb->actual_length, status);
|
|
|
|
- spin_unlock(&xhci->lock);
|
|
|
|
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
|
|
|
|
- spin_lock(&xhci->lock);
|
|
|
|
- }
|
|
|
|
- return 0;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * This function handles all OS-owned events on the event ring. It may drop
|
|
|
|
- * xhci->lock between event processing (e.g. to pass up port status changes).
|
|
|
|
|
|
+ * Process control tds, update urb status and actual_length.
|
|
*/
|
|
*/
|
|
-void xhci_handle_event(struct xhci_hcd *xhci)
|
|
|
|
|
|
+static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
|
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
|
|
|
|
+ struct xhci_virt_ep *ep, int *status)
|
|
{
|
|
{
|
|
- union xhci_trb *event;
|
|
|
|
- int update_ptrs = 1;
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- xhci_dbg(xhci, "In %s\n", __func__);
|
|
|
|
- if (!xhci->event_ring || !xhci->event_ring->dequeue) {
|
|
|
|
- xhci->error_bitmask |= 1 << 1;
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
+ struct xhci_virt_device *xdev;
|
|
|
|
+ struct xhci_ring *ep_ring;
|
|
|
|
+ unsigned int slot_id;
|
|
|
|
+ int ep_index;
|
|
|
|
+ struct xhci_ep_ctx *ep_ctx;
|
|
|
|
+ u32 trb_comp_code;
|
|
|
|
|
|
- event = xhci->event_ring->dequeue;
|
|
|
|
- /* Does the HC or OS own the TRB? */
|
|
|
|
- if ((event->event_cmd.flags & TRB_CYCLE) !=
|
|
|
|
- xhci->event_ring->cycle_state) {
|
|
|
|
- xhci->error_bitmask |= 1 << 2;
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
- xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
|
|
|
|
|
|
+ slot_id = TRB_TO_SLOT_ID(event->flags);
|
|
|
|
+ xdev = xhci->devs[slot_id];
|
|
|
|
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
|
|
|
|
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
|
|
|
|
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
|
|
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
|
|
|
|
|
|
- /* FIXME: Handle more event types. */
|
|
|
|
- switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
|
|
|
|
- case TRB_TYPE(TRB_COMPLETION):
|
|
|
|
- xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
|
|
|
|
- handle_cmd_completion(xhci, &event->event_cmd);
|
|
|
|
- xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
|
|
|
|
- break;
|
|
|
|
- case TRB_TYPE(TRB_PORT_STATUS):
|
|
|
|
- xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
|
|
|
|
- handle_port_status(xhci, event);
|
|
|
|
- xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
|
|
|
|
- update_ptrs = 0;
|
|
|
|
|
|
+ xhci_debug_trb(xhci, xhci->event_ring->dequeue);
|
|
|
|
+ switch (trb_comp_code) {
|
|
|
|
+ case COMP_SUCCESS:
|
|
|
|
+ if (event_trb == ep_ring->dequeue) {
|
|
|
|
+ xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
|
|
|
|
+ "without IOC set??\n");
|
|
|
|
+ *status = -ESHUTDOWN;
|
|
|
|
+ } else if (event_trb != td->last_trb) {
|
|
|
|
+ xhci_warn(xhci, "WARN: Success on ctrl data TRB "
|
|
|
|
+ "without IOC set??\n");
|
|
|
|
+ *status = -ESHUTDOWN;
|
|
|
|
+ } else {
|
|
|
|
+ xhci_dbg(xhci, "Successful control transfer!\n");
|
|
|
|
+ *status = 0;
|
|
|
|
+ }
|
|
break;
|
|
break;
|
|
- case TRB_TYPE(TRB_TRANSFER):
|
|
|
|
- xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
|
|
|
|
- ret = handle_tx_event(xhci, &event->trans_event);
|
|
|
|
- xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
|
|
|
|
- if (ret < 0)
|
|
|
|
- xhci->error_bitmask |= 1 << 9;
|
|
|
|
|
|
+ case COMP_SHORT_TX:
|
|
|
|
+ xhci_warn(xhci, "WARN: short transfer on control ep\n");
|
|
|
|
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
+ *status = -EREMOTEIO;
|
|
else
|
|
else
|
|
- update_ptrs = 0;
|
|
|
|
|
|
+ *status = 0;
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
|
|
|
|
- handle_vendor_event(xhci, event);
|
|
|
|
|
|
+ if (!xhci_requires_manual_halt_cleanup(xhci,
|
|
|
|
+ ep_ctx, trb_comp_code))
|
|
|
|
+ break;
|
|
|
|
+ xhci_dbg(xhci, "TRB error code %u, "
|
|
|
|
+ "halted endpoint index = %u\n",
|
|
|
|
+ trb_comp_code, ep_index);
|
|
|
|
+ /* else fall through */
|
|
|
|
+ case COMP_STALL:
|
|
|
|
+ /* Did we transfer part of the data (middle) phase? */
|
|
|
|
+ if (event_trb != ep_ring->dequeue &&
|
|
|
|
+ event_trb != td->last_trb)
|
|
|
|
+ td->urb->actual_length =
|
|
|
|
+ td->urb->transfer_buffer_length
|
|
|
|
+ - TRB_LEN(event->transfer_len);
|
|
else
|
|
else
|
|
- xhci->error_bitmask |= 1 << 3;
|
|
|
|
|
|
+ td->urb->actual_length = 0;
|
|
|
|
+
|
|
|
|
+ xhci_cleanup_halted_endpoint(xhci,
|
|
|
|
+ slot_id, ep_index, 0, td, event_trb);
|
|
|
|
+ return finish_td(xhci, td, event_trb, event, ep, status, true);
|
|
}
|
|
}
|
|
- /* Any of the above functions may drop and re-acquire the lock, so check
|
|
|
|
- * to make sure a watchdog timer didn't mark the host as non-responsive.
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Did we transfer any data, despite the errors that might have
|
|
|
|
+ * happened? I.e. did we get past the setup stage?
|
|
*/
|
|
*/
|
|
- if (xhci->xhc_state & XHCI_STATE_DYING) {
|
|
|
|
- xhci_dbg(xhci, "xHCI host dying, returning from "
|
|
|
|
- "event handler.\n");
|
|
|
|
- return;
|
|
|
|
|
|
+ if (event_trb != ep_ring->dequeue) {
|
|
|
|
+ /* The event was for the status stage */
|
|
|
|
+ if (event_trb == td->last_trb) {
|
|
|
|
+ if (td->urb->actual_length != 0) {
|
|
|
|
+ /* Don't overwrite a previously set error code
|
|
|
|
+ */
|
|
|
|
+ if ((*status == -EINPROGRESS || *status == 0) &&
|
|
|
|
+ (td->urb->transfer_flags
|
|
|
|
+ & URB_SHORT_NOT_OK))
|
|
|
|
+ /* Did we already see a short data
|
|
|
|
+ * stage? */
|
|
|
|
+ *status = -EREMOTEIO;
|
|
|
|
+ } else {
|
|
|
|
+ td->urb->actual_length =
|
|
|
|
+ td->urb->transfer_buffer_length;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* Maybe the event was for the data stage? */
|
|
|
|
+ if (trb_comp_code != COMP_STOP_INVAL) {
|
|
|
|
+ /* We didn't stop on a link TRB in the middle */
|
|
|
|
+ td->urb->actual_length =
|
|
|
|
+ td->urb->transfer_buffer_length -
|
|
|
|
+ TRB_LEN(event->transfer_len);
|
|
|
|
+ xhci_dbg(xhci, "Waiting for status "
|
|
|
|
+ "stage event\n");
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
- if (update_ptrs) {
|
|
|
|
- /* Update SW and HC event ring dequeue pointer */
|
|
|
|
- inc_deq(xhci, xhci->event_ring, true);
|
|
|
|
- xhci_set_hc_event_deq(xhci);
|
|
|
|
|
|
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Process isochronous tds, update urb packet status and actual_length.
|
|
|
|
+ */
|
|
|
|
+static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
|
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
|
|
|
|
+ struct xhci_virt_ep *ep, int *status)
|
|
|
|
+{
|
|
|
|
+ struct xhci_ring *ep_ring;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
|
|
+ int idx;
|
|
|
|
+ int len = 0;
|
|
|
|
+ int skip_td = 0;
|
|
|
|
+ union xhci_trb *cur_trb;
|
|
|
|
+ struct xhci_segment *cur_seg;
|
|
|
|
+ u32 trb_comp_code;
|
|
|
|
+
|
|
|
|
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
|
|
|
|
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
|
|
|
|
+ urb_priv = td->urb->hcpriv;
|
|
|
|
+ idx = urb_priv->td_cnt;
|
|
|
|
+
|
|
|
|
+ if (ep->skip) {
|
|
|
|
+ /* The transfer is partly done */
|
|
|
|
+ *status = -EXDEV;
|
|
|
|
+ td->urb->iso_frame_desc[idx].status = -EXDEV;
|
|
|
|
+ } else {
|
|
|
|
+ /* handle completion code */
|
|
|
|
+ switch (trb_comp_code) {
|
|
|
|
+ case COMP_SUCCESS:
|
|
|
|
+ td->urb->iso_frame_desc[idx].status = 0;
|
|
|
|
+ xhci_dbg(xhci, "Successful isoc transfer!\n");
|
|
|
|
+ break;
|
|
|
|
+ case COMP_SHORT_TX:
|
|
|
|
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
+ td->urb->iso_frame_desc[idx].status =
|
|
|
|
+ -EREMOTEIO;
|
|
|
|
+ else
|
|
|
|
+ td->urb->iso_frame_desc[idx].status = 0;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_BW_OVER:
|
|
|
|
+ td->urb->iso_frame_desc[idx].status = -ECOMM;
|
|
|
|
+ skip_td = 1;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_BUFF_OVER:
|
|
|
|
+ case COMP_BABBLE:
|
|
|
|
+ td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
|
|
|
|
+ skip_td = 1;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_STALL:
|
|
|
|
+ td->urb->iso_frame_desc[idx].status = -EPROTO;
|
|
|
|
+ skip_td = 1;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_STOP:
|
|
|
|
+ case COMP_STOP_INVAL:
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ td->urb->iso_frame_desc[idx].status = -1;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* calc actual length */
|
|
|
|
+ if (ep->skip) {
|
|
|
|
+ td->urb->iso_frame_desc[idx].actual_length = 0;
|
|
|
|
+ return finish_td(xhci, td, event_trb, event, ep, status, true);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
|
|
|
|
+ td->urb->iso_frame_desc[idx].actual_length =
|
|
|
|
+ td->urb->iso_frame_desc[idx].length;
|
|
|
|
+ td->urb->actual_length +=
|
|
|
|
+ td->urb->iso_frame_desc[idx].length;
|
|
|
|
+ } else {
|
|
|
|
+ for (cur_trb = ep_ring->dequeue,
|
|
|
|
+ cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
|
|
|
|
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
|
|
|
|
+ if ((cur_trb->generic.field[3] &
|
|
|
|
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
|
|
|
|
+ (cur_trb->generic.field[3] &
|
|
|
|
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
|
|
|
|
+ len +=
|
|
|
|
+ TRB_LEN(cur_trb->generic.field[2]);
|
|
|
|
+ }
|
|
|
|
+ len += TRB_LEN(cur_trb->generic.field[2]) -
|
|
|
|
+ TRB_LEN(event->transfer_len);
|
|
|
|
+
|
|
|
|
+ if (trb_comp_code != COMP_STOP_INVAL) {
|
|
|
|
+ td->urb->iso_frame_desc[idx].actual_length = len;
|
|
|
|
+ td->urb->actual_length += len;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
|
|
|
|
+ *status = 0;
|
|
|
|
+
|
|
|
|
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Process bulk and interrupt tds, update urb status and actual_length.
|
|
|
|
+ */
|
|
|
|
+static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
|
+ union xhci_trb *event_trb, struct xhci_transfer_event *event,
|
|
|
|
+ struct xhci_virt_ep *ep, int *status)
|
|
|
|
+{
|
|
|
|
+ struct xhci_ring *ep_ring;
|
|
|
|
+ union xhci_trb *cur_trb;
|
|
|
|
+ struct xhci_segment *cur_seg;
|
|
|
|
+ u32 trb_comp_code;
|
|
|
|
+
|
|
|
|
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
|
|
|
|
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
|
|
|
|
+
|
|
|
|
+ switch (trb_comp_code) {
|
|
|
|
+ case COMP_SUCCESS:
|
|
|
|
+ /* Double check that the HW transferred everything. */
|
|
|
|
+ if (event_trb != td->last_trb) {
|
|
|
|
+ xhci_warn(xhci, "WARN Successful completion "
|
|
|
|
+ "on short TX\n");
|
|
|
|
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
+ *status = -EREMOTEIO;
|
|
|
|
+ else
|
|
|
|
+ *status = 0;
|
|
|
|
+ } else {
|
|
|
|
+ if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
|
|
|
|
+ xhci_dbg(xhci, "Successful bulk "
|
|
|
|
+ "transfer!\n");
|
|
|
|
+ else
|
|
|
|
+ xhci_dbg(xhci, "Successful interrupt "
|
|
|
|
+ "transfer!\n");
|
|
|
|
+ *status = 0;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ case COMP_SHORT_TX:
|
|
|
|
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
+ *status = -EREMOTEIO;
|
|
|
|
+ else
|
|
|
|
+ *status = 0;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ /* Others already handled above */
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ dev_dbg(&td->urb->dev->dev,
|
|
|
|
+ "ep %#x - asked for %d bytes, "
|
|
|
|
+ "%d bytes untransferred\n",
|
|
|
|
+ td->urb->ep->desc.bEndpointAddress,
|
|
|
|
+ td->urb->transfer_buffer_length,
|
|
|
|
+ TRB_LEN(event->transfer_len));
|
|
|
|
+ /* Fast path - was this the last TRB in the TD for this URB? */
|
|
|
|
+ if (event_trb == td->last_trb) {
|
|
|
|
+ if (TRB_LEN(event->transfer_len) != 0) {
|
|
|
|
+ td->urb->actual_length =
|
|
|
|
+ td->urb->transfer_buffer_length -
|
|
|
|
+ TRB_LEN(event->transfer_len);
|
|
|
|
+ if (td->urb->transfer_buffer_length <
|
|
|
|
+ td->urb->actual_length) {
|
|
|
|
+ xhci_warn(xhci, "HC gave bad length "
|
|
|
|
+ "of %d bytes left\n",
|
|
|
|
+ TRB_LEN(event->transfer_len));
|
|
|
|
+ td->urb->actual_length = 0;
|
|
|
|
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
+ *status = -EREMOTEIO;
|
|
|
|
+ else
|
|
|
|
+ *status = 0;
|
|
|
|
+ }
|
|
|
|
+ /* Don't overwrite a previously set error code */
|
|
|
|
+ if (*status == -EINPROGRESS) {
|
|
|
|
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
|
|
|
|
+ *status = -EREMOTEIO;
|
|
|
|
+ else
|
|
|
|
+ *status = 0;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ td->urb->actual_length =
|
|
|
|
+ td->urb->transfer_buffer_length;
|
|
|
|
+ /* Ignore a short packet completion if the
|
|
|
|
+ * untransferred length was zero.
|
|
|
|
+ */
|
|
|
|
+ if (*status == -EREMOTEIO)
|
|
|
|
+ *status = 0;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* Slow path - walk the list, starting from the dequeue
|
|
|
|
+ * pointer, to get the actual length transferred.
|
|
|
|
+ */
|
|
|
|
+ td->urb->actual_length = 0;
|
|
|
|
+ for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
|
|
|
|
+ cur_trb != event_trb;
|
|
|
|
+ next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
|
|
|
|
+ if ((cur_trb->generic.field[3] &
|
|
|
|
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
|
|
|
|
+ (cur_trb->generic.field[3] &
|
|
|
|
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
|
|
|
|
+ td->urb->actual_length +=
|
|
|
|
+ TRB_LEN(cur_trb->generic.field[2]);
|
|
|
|
+ }
|
|
|
|
+ /* If the ring didn't stop on a Link or No-op TRB, add
|
|
|
|
+ * in the actual bytes transferred from the Normal TRB
|
|
|
|
+ */
|
|
|
|
+ if (trb_comp_code != COMP_STOP_INVAL)
|
|
|
|
+ td->urb->actual_length +=
|
|
|
|
+ TRB_LEN(cur_trb->generic.field[2]) -
|
|
|
|
+ TRB_LEN(event->transfer_len);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * If this function returns an error condition, it means it got a Transfer
|
|
|
|
+ * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
|
|
|
|
+ * At this point, the host controller is probably hosed and should be reset.
|
|
|
|
+ */
|
|
|
|
+static int handle_tx_event(struct xhci_hcd *xhci,
|
|
|
|
+ struct xhci_transfer_event *event)
|
|
|
|
+{
|
|
|
|
+ struct xhci_virt_device *xdev;
|
|
|
|
+ struct xhci_virt_ep *ep;
|
|
|
|
+ struct xhci_ring *ep_ring;
|
|
|
|
+ unsigned int slot_id;
|
|
|
|
+ int ep_index;
|
|
|
|
+ struct xhci_td *td = NULL;
|
|
|
|
+ dma_addr_t event_dma;
|
|
|
|
+ struct xhci_segment *event_seg;
|
|
|
|
+ union xhci_trb *event_trb;
|
|
|
|
+ struct urb *urb = NULL;
|
|
|
|
+ int status = -EINPROGRESS;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
|
|
+ struct xhci_ep_ctx *ep_ctx;
|
|
|
|
+ u32 trb_comp_code;
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ slot_id = TRB_TO_SLOT_ID(event->flags);
|
|
|
|
+ xdev = xhci->devs[slot_id];
|
|
|
|
+ if (!xdev) {
|
|
|
|
+ xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
|
|
|
|
+ return -ENODEV;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Endpoint ID is 1 based, our index is zero based */
|
|
|
|
+ ep_index = TRB_TO_EP_ID(event->flags) - 1;
|
|
|
|
+ xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
|
|
|
|
+ ep = &xdev->eps[ep_index];
|
|
|
|
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
|
|
|
|
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
|
|
+ if (!ep_ring ||
|
|
|
|
+ (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
|
|
|
|
+ xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
|
|
|
|
+ "or incorrect stream ring\n");
|
|
|
|
+ return -ENODEV;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ event_dma = event->buffer;
|
|
|
|
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
|
|
|
|
+ /* Look for common error cases */
|
|
|
|
+ switch (trb_comp_code) {
|
|
|
|
+ /* Skip codes that require special handling depending on
|
|
|
|
+ * transfer type
|
|
|
|
+ */
|
|
|
|
+ case COMP_SUCCESS:
|
|
|
|
+ case COMP_SHORT_TX:
|
|
|
|
+ break;
|
|
|
|
+ case COMP_STOP:
|
|
|
|
+ xhci_dbg(xhci, "Stopped on Transfer TRB\n");
|
|
|
|
+ break;
|
|
|
|
+ case COMP_STOP_INVAL:
|
|
|
|
+ xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
|
|
|
|
+ break;
|
|
|
|
+ case COMP_STALL:
|
|
|
|
+ xhci_warn(xhci, "WARN: Stalled endpoint\n");
|
|
|
|
+ ep->ep_state |= EP_HALTED;
|
|
|
|
+ status = -EPIPE;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_TRB_ERR:
|
|
|
|
+ xhci_warn(xhci, "WARN: TRB error on endpoint\n");
|
|
|
|
+ status = -EILSEQ;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_SPLIT_ERR:
|
|
|
|
+ case COMP_TX_ERR:
|
|
|
|
+ xhci_warn(xhci, "WARN: transfer error on endpoint\n");
|
|
|
|
+ status = -EPROTO;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_BABBLE:
|
|
|
|
+ xhci_warn(xhci, "WARN: babble error on endpoint\n");
|
|
|
|
+ status = -EOVERFLOW;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_DB_ERR:
|
|
|
|
+ xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
|
|
|
|
+ status = -ENOSR;
|
|
|
|
+ break;
|
|
|
|
+ case COMP_BW_OVER:
|
|
|
|
+ xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
|
|
|
|
+ break;
|
|
|
|
+ case COMP_BUFF_OVER:
|
|
|
|
+ xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
|
|
|
|
+ break;
|
|
|
|
+ case COMP_UNDERRUN:
|
|
|
|
+ /*
|
|
|
|
+ * When the Isoch ring is empty, the xHC will generate
|
|
|
|
+ * a Ring Overrun Event for IN Isoch endpoint or Ring
|
|
|
|
+ * Underrun Event for OUT Isoch endpoint.
|
|
|
|
+ */
|
|
|
|
+ xhci_dbg(xhci, "underrun event on endpoint\n");
|
|
|
|
+ if (!list_empty(&ep_ring->td_list))
|
|
|
|
+ xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
|
|
|
|
+ "still with TDs queued?\n",
|
|
|
|
+ TRB_TO_SLOT_ID(event->flags), ep_index);
|
|
|
|
+ goto cleanup;
|
|
|
|
+ case COMP_OVERRUN:
|
|
|
|
+ xhci_dbg(xhci, "overrun event on endpoint\n");
|
|
|
|
+ if (!list_empty(&ep_ring->td_list))
|
|
|
|
+ xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
|
|
|
|
+ "still with TDs queued?\n",
|
|
|
|
+ TRB_TO_SLOT_ID(event->flags), ep_index);
|
|
|
|
+ goto cleanup;
|
|
|
|
+ case COMP_MISSED_INT:
|
|
|
|
+ /*
|
|
|
|
+ * When encounter missed service error, one or more isoc tds
|
|
|
|
+ * may be missed by xHC.
|
|
|
|
+ * Set skip flag of the ep_ring; Complete the missed tds as
|
|
|
|
+ * short transfer when process the ep_ring next time.
|
|
|
|
+ */
|
|
|
|
+ ep->skip = true;
|
|
|
|
+ xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
|
|
|
|
+ goto cleanup;
|
|
|
|
+ default:
|
|
|
|
+ if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
|
|
|
|
+ status = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
|
|
|
|
+ "busted\n");
|
|
|
|
+ goto cleanup;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ /* This TRB should be in the TD at the head of this ring's
|
|
|
|
+ * TD list.
|
|
|
|
+ */
|
|
|
|
+ if (list_empty(&ep_ring->td_list)) {
|
|
|
|
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
|
|
|
|
+ "with no TDs queued?\n",
|
|
|
|
+ TRB_TO_SLOT_ID(event->flags), ep_index);
|
|
|
|
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
|
|
|
|
+ (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
|
|
|
|
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
|
|
|
|
+ if (ep->skip) {
|
|
|
|
+ ep->skip = false;
|
|
|
|
+ xhci_dbg(xhci, "td_list is empty while skip "
|
|
|
|
+ "flag set. Clear skip flag.\n");
|
|
|
|
+ }
|
|
|
|
+ ret = 0;
|
|
|
|
+ goto cleanup;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
|
|
|
|
+ /* Is this a TRB in the currently executing TD? */
|
|
|
|
+ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
|
|
|
|
+ td->last_trb, event_dma);
|
|
|
|
+ if (event_seg && ep->skip) {
|
|
|
|
+ xhci_dbg(xhci, "Found td. Clear skip flag.\n");
|
|
|
|
+ ep->skip = false;
|
|
|
|
+ }
|
|
|
|
+ if (!event_seg &&
|
|
|
|
+ (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
|
|
|
|
+ /* HC is busted, give up! */
|
|
|
|
+ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
|
|
|
|
+ "part of current TD\n");
|
|
|
|
+ return -ESHUTDOWN;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (event_seg) {
|
|
|
|
+ event_trb = &event_seg->trbs[(event_dma -
|
|
|
|
+ event_seg->dma) / sizeof(*event_trb)];
|
|
|
|
+ /*
|
|
|
|
+ * No-op TRB should not trigger interrupts.
|
|
|
|
+ * If event_trb is a no-op TRB, it means the
|
|
|
|
+ * corresponding TD has been cancelled. Just ignore
|
|
|
|
+ * the TD.
|
|
|
|
+ */
|
|
|
|
+ if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
|
|
|
|
+ == TRB_TYPE(TRB_TR_NOOP)) {
|
|
|
|
+ xhci_dbg(xhci, "event_trb is a no-op TRB. "
|
|
|
|
+ "Skip it\n");
|
|
|
|
+ goto cleanup;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Now update the urb's actual_length and give back to
|
|
|
|
+ * the core
|
|
|
|
+ */
|
|
|
|
+ if (usb_endpoint_xfer_control(&td->urb->ep->desc))
|
|
|
|
+ ret = process_ctrl_td(xhci, td, event_trb, event, ep,
|
|
|
|
+ &status);
|
|
|
|
+ else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
|
|
|
|
+ ret = process_isoc_td(xhci, td, event_trb, event, ep,
|
|
|
|
+ &status);
|
|
|
|
+ else
|
|
|
|
+ ret = process_bulk_intr_td(xhci, td, event_trb, event,
|
|
|
|
+ ep, &status);
|
|
|
|
+
|
|
|
|
+cleanup:
|
|
|
|
+ /*
|
|
|
|
+ * Do not update event ring dequeue pointer if ep->skip is set.
|
|
|
|
+ * Will roll back to continue process missed tds.
|
|
|
|
+ */
|
|
|
|
+ if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
|
|
|
|
+ inc_deq(xhci, xhci->event_ring, true);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ret) {
|
|
|
|
+ urb = td->urb;
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+ /* Leave the TD around for the reset endpoint function
|
|
|
|
+ * to use(but only if it's not a control endpoint,
|
|
|
|
+ * since we already queued the Set TR dequeue pointer
|
|
|
|
+ * command for stalled control endpoints).
|
|
|
|
+ */
|
|
|
|
+ if (usb_endpoint_xfer_control(&urb->ep->desc) ||
|
|
|
|
+ (trb_comp_code != COMP_STALL &&
|
|
|
|
+ trb_comp_code != COMP_BABBLE))
|
|
|
|
+ xhci_urb_free_priv(xhci, urb_priv);
|
|
|
|
+
|
|
|
|
+ usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
|
|
|
|
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
|
|
|
|
+ "status = %d\n",
|
|
|
|
+ urb, urb->actual_length, status);
|
|
|
|
+ spin_unlock(&xhci->lock);
|
|
|
|
+ usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
|
|
|
|
+ spin_lock(&xhci->lock);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If ep->skip is set, it means there are missed tds on the
|
|
|
|
+ * endpoint ring need to take care of.
|
|
|
|
+ * Process them as short transfer until reach the td pointed by
|
|
|
|
+ * the event.
|
|
|
|
+ */
|
|
|
|
+ } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * This function handles all OS-owned events on the event ring. It may drop
|
|
|
|
+ * xhci->lock between event processing (e.g. to pass up port status changes).
|
|
|
|
+ */
|
|
|
|
+static void xhci_handle_event(struct xhci_hcd *xhci)
|
|
|
|
+{
|
|
|
|
+ union xhci_trb *event;
|
|
|
|
+ int update_ptrs = 1;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ xhci_dbg(xhci, "In %s\n", __func__);
|
|
|
|
+ if (!xhci->event_ring || !xhci->event_ring->dequeue) {
|
|
|
|
+ xhci->error_bitmask |= 1 << 1;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ event = xhci->event_ring->dequeue;
|
|
|
|
+ /* Does the HC or OS own the TRB? */
|
|
|
|
+ if ((event->event_cmd.flags & TRB_CYCLE) !=
|
|
|
|
+ xhci->event_ring->cycle_state) {
|
|
|
|
+ xhci->error_bitmask |= 1 << 2;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
|
|
|
|
+
|
|
|
|
+ /* FIXME: Handle more event types. */
|
|
|
|
+ switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
|
|
|
|
+ case TRB_TYPE(TRB_COMPLETION):
|
|
|
|
+ xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
|
|
|
|
+ handle_cmd_completion(xhci, &event->event_cmd);
|
|
|
|
+ xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
|
|
|
|
+ break;
|
|
|
|
+ case TRB_TYPE(TRB_PORT_STATUS):
|
|
|
|
+ xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
|
|
|
|
+ handle_port_status(xhci, event);
|
|
|
|
+ xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
|
|
|
|
+ update_ptrs = 0;
|
|
|
|
+ break;
|
|
|
|
+ case TRB_TYPE(TRB_TRANSFER):
|
|
|
|
+ xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
|
|
|
|
+ ret = handle_tx_event(xhci, &event->trans_event);
|
|
|
|
+ xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ xhci->error_bitmask |= 1 << 9;
|
|
|
|
+ else
|
|
|
|
+ update_ptrs = 0;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
|
|
|
|
+ handle_vendor_event(xhci, event);
|
|
|
|
+ else
|
|
|
|
+ xhci->error_bitmask |= 1 << 3;
|
|
|
|
+ }
|
|
|
|
+ /* Any of the above functions may drop and re-acquire the lock, so check
|
|
|
|
+ * to make sure a watchdog timer didn't mark the host as non-responsive.
|
|
|
|
+ */
|
|
|
|
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
|
|
|
|
+ xhci_dbg(xhci, "xHCI host dying, returning from "
|
|
|
|
+ "event handler.\n");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (update_ptrs)
|
|
|
|
+ /* Update SW event ring dequeue pointer */
|
|
|
|
+ inc_deq(xhci, xhci->event_ring, true);
|
|
|
|
+
|
|
/* Are there more items on the event ring? */
|
|
/* Are there more items on the event ring? */
|
|
xhci_handle_event(xhci);
|
|
xhci_handle_event(xhci);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
|
|
|
|
+ * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
|
|
|
|
+ * indicators of an event TRB error, but we check the status *first* to be safe.
|
|
|
|
+ */
|
|
|
|
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
|
|
|
|
+{
|
|
|
|
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
|
|
|
+ u32 status;
|
|
|
|
+ union xhci_trb *trb;
|
|
|
|
+ u64 temp_64;
|
|
|
|
+ union xhci_trb *event_ring_deq;
|
|
|
|
+ dma_addr_t deq;
|
|
|
|
+
|
|
|
|
+ spin_lock(&xhci->lock);
|
|
|
|
+ trb = xhci->event_ring->dequeue;
|
|
|
|
+ /* Check if the xHC generated the interrupt, or the irq is shared */
|
|
|
|
+ status = xhci_readl(xhci, &xhci->op_regs->status);
|
|
|
|
+ if (status == 0xffffffff)
|
|
|
|
+ goto hw_died;
|
|
|
|
+
|
|
|
|
+ if (!(status & STS_EINT)) {
|
|
|
|
+ spin_unlock(&xhci->lock);
|
|
|
|
+ xhci_warn(xhci, "Spurious interrupt.\n");
|
|
|
|
+ return IRQ_NONE;
|
|
|
|
+ }
|
|
|
|
+ xhci_dbg(xhci, "op reg status = %08x\n", status);
|
|
|
|
+ xhci_dbg(xhci, "Event ring dequeue ptr:\n");
|
|
|
|
+ xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
|
|
|
|
+ (unsigned long long)
|
|
|
|
+ xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
|
|
|
|
+ lower_32_bits(trb->link.segment_ptr),
|
|
|
|
+ upper_32_bits(trb->link.segment_ptr),
|
|
|
|
+ (unsigned int) trb->link.intr_target,
|
|
|
|
+ (unsigned int) trb->link.control);
|
|
|
|
+
|
|
|
|
+ if (status & STS_FATAL) {
|
|
|
|
+ xhci_warn(xhci, "WARNING: Host System Error\n");
|
|
|
|
+ xhci_halt(xhci);
|
|
|
|
+hw_died:
|
|
|
|
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
|
|
|
|
+ spin_unlock(&xhci->lock);
|
|
|
|
+ return -ESHUTDOWN;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Clear the op reg interrupt status first,
|
|
|
|
+ * so we can receive interrupts from other MSI-X interrupters.
|
|
|
|
+ * Write 1 to clear the interrupt status.
|
|
|
|
+ */
|
|
|
|
+ status |= STS_EINT;
|
|
|
|
+ xhci_writel(xhci, status, &xhci->op_regs->status);
|
|
|
|
+ /* FIXME when MSI-X is supported and there are multiple vectors */
|
|
|
|
+ /* Clear the MSI-X event interrupt status */
|
|
|
|
+
|
|
|
|
+ if (hcd->irq != -1) {
|
|
|
|
+ u32 irq_pending;
|
|
|
|
+ /* Acknowledge the PCI interrupt */
|
|
|
|
+ irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
|
|
|
+ irq_pending |= 0x3;
|
|
|
|
+ xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
|
|
|
|
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
|
|
|
|
+ "Shouldn't IRQs be disabled?\n");
|
|
|
|
+ /* Clear the event handler busy flag (RW1C);
|
|
|
|
+ * the event ring should be empty.
|
|
|
|
+ */
|
|
|
|
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
|
|
|
+ xhci_write_64(xhci, temp_64 | ERST_EHB,
|
|
|
|
+ &xhci->ir_set->erst_dequeue);
|
|
|
|
+ spin_unlock(&xhci->lock);
|
|
|
|
+
|
|
|
|
+ return IRQ_HANDLED;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ event_ring_deq = xhci->event_ring->dequeue;
|
|
|
|
+ /* FIXME this should be a delayed service routine
|
|
|
|
+ * that clears the EHB.
|
|
|
|
+ */
|
|
|
|
+ xhci_handle_event(xhci);
|
|
|
|
+
|
|
|
|
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
|
|
|
|
+ /* If necessary, update the HW's version of the event ring deq ptr. */
|
|
|
|
+ if (event_ring_deq != xhci->event_ring->dequeue) {
|
|
|
|
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
|
|
|
|
+ xhci->event_ring->dequeue);
|
|
|
|
+ if (deq == 0)
|
|
|
|
+ xhci_warn(xhci, "WARN something wrong with SW event "
|
|
|
|
+ "ring dequeue ptr.\n");
|
|
|
|
+ /* Update HC event ring dequeue pointer */
|
|
|
|
+ temp_64 &= ERST_PTR_MASK;
|
|
|
|
+ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Clear the event handler busy flag (RW1C); event ring is empty. */
|
|
|
|
+ temp_64 |= ERST_EHB;
|
|
|
|
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
|
|
|
|
+
|
|
|
|
+ spin_unlock(&xhci->lock);
|
|
|
|
+
|
|
|
|
+ return IRQ_HANDLED;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
|
|
|
|
+{
|
|
|
|
+ irqreturn_t ret;
|
|
|
|
+
|
|
|
|
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
|
|
|
|
+
|
|
|
|
+ ret = xhci_irq(hcd);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
/**** Endpoint Ring Operations ****/
|
|
/**** Endpoint Ring Operations ****/
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1827,10 +2225,12 @@ static int prepare_transfer(struct xhci_hcd *xhci,
|
|
unsigned int stream_id,
|
|
unsigned int stream_id,
|
|
unsigned int num_trbs,
|
|
unsigned int num_trbs,
|
|
struct urb *urb,
|
|
struct urb *urb,
|
|
- struct xhci_td **td,
|
|
|
|
|
|
+ unsigned int td_index,
|
|
gfp_t mem_flags)
|
|
gfp_t mem_flags)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
|
|
+ struct xhci_td *td;
|
|
struct xhci_ring *ep_ring;
|
|
struct xhci_ring *ep_ring;
|
|
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
|
|
|
|
@@ -1846,24 +2246,29 @@ static int prepare_transfer(struct xhci_hcd *xhci,
|
|
num_trbs, mem_flags);
|
|
num_trbs, mem_flags);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
- *td = kzalloc(sizeof(struct xhci_td), mem_flags);
|
|
|
|
- if (!*td)
|
|
|
|
- return -ENOMEM;
|
|
|
|
- INIT_LIST_HEAD(&(*td)->td_list);
|
|
|
|
- INIT_LIST_HEAD(&(*td)->cancelled_td_list);
|
|
|
|
|
|
|
|
- ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
|
|
|
|
- if (unlikely(ret)) {
|
|
|
|
- kfree(*td);
|
|
|
|
- return ret;
|
|
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+ td = urb_priv->td[td_index];
|
|
|
|
+
|
|
|
|
+ INIT_LIST_HEAD(&td->td_list);
|
|
|
|
+ INIT_LIST_HEAD(&td->cancelled_td_list);
|
|
|
|
+
|
|
|
|
+ if (td_index == 0) {
|
|
|
|
+ ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
|
|
|
|
+ if (unlikely(ret)) {
|
|
|
|
+ xhci_urb_free_priv(xhci, urb_priv);
|
|
|
|
+ urb->hcpriv = NULL;
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
- (*td)->urb = urb;
|
|
|
|
- urb->hcpriv = (void *) (*td);
|
|
|
|
|
|
+ td->urb = urb;
|
|
/* Add this TD to the tail of the endpoint ring's TD list */
|
|
/* Add this TD to the tail of the endpoint ring's TD list */
|
|
- list_add_tail(&(*td)->td_list, &ep_ring->td_list);
|
|
|
|
- (*td)->start_seg = ep_ring->enq_seg;
|
|
|
|
- (*td)->first_trb = ep_ring->enqueue;
|
|
|
|
|
|
+ list_add_tail(&td->td_list, &ep_ring->td_list);
|
|
|
|
+ td->start_seg = ep_ring->enq_seg;
|
|
|
|
+ td->first_trb = ep_ring->enqueue;
|
|
|
|
+
|
|
|
|
+ urb_priv->td[td_index] = td;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2002,6 +2407,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
{
|
|
{
|
|
struct xhci_ring *ep_ring;
|
|
struct xhci_ring *ep_ring;
|
|
unsigned int num_trbs;
|
|
unsigned int num_trbs;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
struct xhci_td *td;
|
|
struct xhci_td *td;
|
|
struct scatterlist *sg;
|
|
struct scatterlist *sg;
|
|
int num_sgs;
|
|
int num_sgs;
|
|
@@ -2022,9 +2428,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
|
|
|
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
ep_index, urb->stream_id,
|
|
ep_index, urb->stream_id,
|
|
- num_trbs, urb, &td, mem_flags);
|
|
|
|
|
|
+ num_trbs, urb, 0, mem_flags);
|
|
if (trb_buff_len < 0)
|
|
if (trb_buff_len < 0)
|
|
return trb_buff_len;
|
|
return trb_buff_len;
|
|
|
|
+
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+ td = urb_priv->td[0];
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
|
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
|
* until we've finished creating all the other TRBs. The ring's cycle
|
|
* until we've finished creating all the other TRBs. The ring's cycle
|
|
@@ -2145,6 +2555,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
struct urb *urb, int slot_id, unsigned int ep_index)
|
|
struct urb *urb, int slot_id, unsigned int ep_index)
|
|
{
|
|
{
|
|
struct xhci_ring *ep_ring;
|
|
struct xhci_ring *ep_ring;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
struct xhci_td *td;
|
|
struct xhci_td *td;
|
|
int num_trbs;
|
|
int num_trbs;
|
|
struct xhci_generic_trb *start_trb;
|
|
struct xhci_generic_trb *start_trb;
|
|
@@ -2190,10 +2601,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
|
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
ep_index, urb->stream_id,
|
|
ep_index, urb->stream_id,
|
|
- num_trbs, urb, &td, mem_flags);
|
|
|
|
|
|
+ num_trbs, urb, 0, mem_flags);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+ td = urb_priv->td[0];
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
|
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
|
* until we've finished creating all the other TRBs. The ring's cycle
|
|
* until we've finished creating all the other TRBs. The ring's cycle
|
|
@@ -2279,6 +2693,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
struct xhci_generic_trb *start_trb;
|
|
struct xhci_generic_trb *start_trb;
|
|
int start_cycle;
|
|
int start_cycle;
|
|
u32 field, length_field;
|
|
u32 field, length_field;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
struct xhci_td *td;
|
|
struct xhci_td *td;
|
|
|
|
|
|
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
|
|
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
|
|
@@ -2306,10 +2721,13 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
num_trbs++;
|
|
num_trbs++;
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
ep_index, urb->stream_id,
|
|
ep_index, urb->stream_id,
|
|
- num_trbs, urb, &td, mem_flags);
|
|
|
|
|
|
+ num_trbs, urb, 0, mem_flags);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+ td = urb_priv->td[0];
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
|
* Don't give the first TRB to the hardware (by toggling the cycle bit)
|
|
* until we've finished creating all the other TRBs. The ring's cycle
|
|
* until we've finished creating all the other TRBs. The ring's cycle
|
|
@@ -2366,6 +2784,224 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
|
|
|
|
+ struct urb *urb, int i)
|
|
|
|
+{
|
|
|
|
+ int num_trbs = 0;
|
|
|
|
+ u64 addr, td_len, running_total;
|
|
|
|
+
|
|
|
|
+ addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
|
|
|
|
+ td_len = urb->iso_frame_desc[i].length;
|
|
|
|
+
|
|
|
|
+ running_total = TRB_MAX_BUFF_SIZE -
|
|
|
|
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
|
|
|
+ if (running_total != 0)
|
|
|
|
+ num_trbs++;
|
|
|
|
+
|
|
|
|
+ while (running_total < td_len) {
|
|
|
|
+ num_trbs++;
|
|
|
|
+ running_total += TRB_MAX_BUFF_SIZE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return num_trbs;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* This is for isoc transfer */
|
|
|
|
+static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
|
+ struct urb *urb, int slot_id, unsigned int ep_index)
|
|
|
|
+{
|
|
|
|
+ struct xhci_ring *ep_ring;
|
|
|
|
+ struct urb_priv *urb_priv;
|
|
|
|
+ struct xhci_td *td;
|
|
|
|
+ int num_tds, trbs_per_td;
|
|
|
|
+ struct xhci_generic_trb *start_trb;
|
|
|
|
+ bool first_trb;
|
|
|
|
+ int start_cycle;
|
|
|
|
+ u32 field, length_field;
|
|
|
|
+ int running_total, trb_buff_len, td_len, td_remain_len, ret;
|
|
|
|
+ u64 start_addr, addr;
|
|
|
|
+ int i, j;
|
|
|
|
+
|
|
|
|
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
|
|
|
|
+
|
|
|
|
+ num_tds = urb->number_of_packets;
|
|
|
|
+ if (num_tds < 1) {
|
|
|
|
+ xhci_dbg(xhci, "Isoc URB with zero packets?\n");
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!in_interrupt())
|
|
|
|
+ dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
|
|
|
|
+ " addr = %#llx, num_tds = %d\n",
|
|
|
|
+ urb->ep->desc.bEndpointAddress,
|
|
|
|
+ urb->transfer_buffer_length,
|
|
|
|
+ urb->transfer_buffer_length,
|
|
|
|
+ (unsigned long long)urb->transfer_dma,
|
|
|
|
+ num_tds);
|
|
|
|
+
|
|
|
|
+ start_addr = (u64) urb->transfer_dma;
|
|
|
|
+ start_trb = &ep_ring->enqueue->generic;
|
|
|
|
+ start_cycle = ep_ring->cycle_state;
|
|
|
|
+
|
|
|
|
+ /* Queue the first TRB, even if it's zero-length */
|
|
|
|
+ for (i = 0; i < num_tds; i++) {
|
|
|
|
+ first_trb = true;
|
|
|
|
+
|
|
|
|
+ running_total = 0;
|
|
|
|
+ addr = start_addr + urb->iso_frame_desc[i].offset;
|
|
|
|
+ td_len = urb->iso_frame_desc[i].length;
|
|
|
|
+ td_remain_len = td_len;
|
|
|
|
+
|
|
|
|
+ trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
|
|
|
|
+
|
|
|
|
+ ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
|
|
|
|
+ urb->stream_id, trbs_per_td, urb, i, mem_flags);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ urb_priv = urb->hcpriv;
|
|
|
|
+ td = urb_priv->td[i];
|
|
|
|
+
|
|
|
|
+ for (j = 0; j < trbs_per_td; j++) {
|
|
|
|
+ u32 remainder = 0;
|
|
|
|
+ field = 0;
|
|
|
|
+
|
|
|
|
+ if (first_trb) {
|
|
|
|
+ /* Queue the isoc TRB */
|
|
|
|
+ field |= TRB_TYPE(TRB_ISOC);
|
|
|
|
+ /* Assume URB_ISO_ASAP is set */
|
|
|
|
+ field |= TRB_SIA;
|
|
|
|
+ if (i > 0)
|
|
|
|
+ field |= ep_ring->cycle_state;
|
|
|
|
+ first_trb = false;
|
|
|
|
+ } else {
|
|
|
|
+ /* Queue other normal TRBs */
|
|
|
|
+ field |= TRB_TYPE(TRB_NORMAL);
|
|
|
|
+ field |= ep_ring->cycle_state;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Chain all the TRBs together; clear the chain bit in
|
|
|
|
+ * the last TRB to indicate it's the last TRB in the
|
|
|
|
+ * chain.
|
|
|
|
+ */
|
|
|
|
+ if (j < trbs_per_td - 1) {
|
|
|
|
+ field |= TRB_CHAIN;
|
|
|
|
+ } else {
|
|
|
|
+ td->last_trb = ep_ring->enqueue;
|
|
|
|
+ field |= TRB_IOC;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Calculate TRB length */
|
|
|
|
+ trb_buff_len = TRB_MAX_BUFF_SIZE -
|
|
|
|
+ (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
|
|
|
+ if (trb_buff_len > td_remain_len)
|
|
|
|
+ trb_buff_len = td_remain_len;
|
|
|
|
+
|
|
|
|
+ remainder = xhci_td_remainder(td_len - running_total);
|
|
|
|
+ length_field = TRB_LEN(trb_buff_len) |
|
|
|
|
+ remainder |
|
|
|
|
+ TRB_INTR_TARGET(0);
|
|
|
|
+ queue_trb(xhci, ep_ring, false, false,
|
|
|
|
+ lower_32_bits(addr),
|
|
|
|
+ upper_32_bits(addr),
|
|
|
|
+ length_field,
|
|
|
|
+ /* We always want to know if the TRB was short,
|
|
|
|
+ * or we won't get an event when it completes.
|
|
|
|
+ * (Unless we use event data TRBs, which are a
|
|
|
|
+ * waste of space and HC resources.)
|
|
|
|
+ */
|
|
|
|
+ field | TRB_ISP);
|
|
|
|
+ running_total += trb_buff_len;
|
|
|
|
+
|
|
|
|
+ addr += trb_buff_len;
|
|
|
|
+ td_remain_len -= trb_buff_len;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Check TD length */
|
|
|
|
+ if (running_total != td_len) {
|
|
|
|
+ xhci_err(xhci, "ISOC TD length unmatch\n");
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ wmb();
|
|
|
|
+ start_trb->field[3] |= start_cycle;
|
|
|
|
+
|
|
|
|
+ ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Check transfer ring to guarantee there is enough room for the urb.
|
|
|
|
+ * Update ISO URB start_frame and interval.
|
|
|
|
+ * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
|
|
|
|
+ * update the urb->start_frame by now.
|
|
|
|
+ * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
|
|
|
|
+ */
|
|
|
|
+int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
|
+ struct urb *urb, int slot_id, unsigned int ep_index)
|
|
|
|
+{
|
|
|
|
+ struct xhci_virt_device *xdev;
|
|
|
|
+ struct xhci_ring *ep_ring;
|
|
|
|
+ struct xhci_ep_ctx *ep_ctx;
|
|
|
|
+ int start_frame;
|
|
|
|
+ int xhci_interval;
|
|
|
|
+ int ep_interval;
|
|
|
|
+ int num_tds, num_trbs, i;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ xdev = xhci->devs[slot_id];
|
|
|
|
+ ep_ring = xdev->eps[ep_index].ring;
|
|
|
|
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
|
|
+
|
|
|
|
+ num_trbs = 0;
|
|
|
|
+ num_tds = urb->number_of_packets;
|
|
|
|
+ for (i = 0; i < num_tds; i++)
|
|
|
|
+ num_trbs += count_isoc_trbs_needed(xhci, urb, i);
|
|
|
|
+
|
|
|
|
+ /* Check the ring to guarantee there is enough room for the whole urb.
|
|
|
|
+ * Do not insert any td of the urb to the ring if the check failed.
|
|
|
|
+ */
|
|
|
|
+ ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
|
|
|
|
+ num_trbs, mem_flags);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
|
|
|
|
+ start_frame &= 0x3fff;
|
|
|
|
+
|
|
|
|
+ urb->start_frame = start_frame;
|
|
|
|
+ if (urb->dev->speed == USB_SPEED_LOW ||
|
|
|
|
+ urb->dev->speed == USB_SPEED_FULL)
|
|
|
|
+ urb->start_frame >>= 3;
|
|
|
|
+
|
|
|
|
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
|
|
|
|
+ ep_interval = urb->interval;
|
|
|
|
+ /* Convert to microframes */
|
|
|
|
+ if (urb->dev->speed == USB_SPEED_LOW ||
|
|
|
|
+ urb->dev->speed == USB_SPEED_FULL)
|
|
|
|
+ ep_interval *= 8;
|
|
|
|
+ /* FIXME change this to a warning and a suggestion to use the new API
|
|
|
|
+ * to set the polling interval (once the API is added).
|
|
|
|
+ */
|
|
|
|
+ if (xhci_interval != ep_interval) {
|
|
|
|
+ if (!printk_ratelimit())
|
|
|
|
+ dev_dbg(&urb->dev->dev, "Driver uses different interval"
|
|
|
|
+ " (%d microframe%s) than xHCI "
|
|
|
|
+ "(%d microframe%s)\n",
|
|
|
|
+ ep_interval,
|
|
|
|
+ ep_interval == 1 ? "" : "s",
|
|
|
|
+ xhci_interval,
|
|
|
|
+ xhci_interval == 1 ? "" : "s");
|
|
|
|
+ urb->interval = xhci_interval;
|
|
|
|
+ /* Convert back to frames for LS/FS devices */
|
|
|
|
+ if (urb->dev->speed == USB_SPEED_LOW ||
|
|
|
|
+ urb->dev->speed == USB_SPEED_FULL)
|
|
|
|
+ urb->interval /= 8;
|
|
|
|
+ }
|
|
|
|
+ return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
|
|
|
|
+}
|
|
|
|
+
|
|
/**** Command Ring Operations ****/
|
|
/**** Command Ring Operations ****/
|
|
|
|
|
|
/* Generic function for queueing a command TRB on the command ring.
|
|
/* Generic function for queueing a command TRB on the command ring.
|