|
|
@@ -3555,6 +3555,97 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Calculates Frame ID field of the isochronous TRB identifies the
|
|
|
+ * target frame that the Interval associated with this Isochronous
|
|
|
+ * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
|
|
|
+ *
|
|
|
+ * Returns actual frame id on success, negative value on error.
|
|
|
+ */
|
|
|
+static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
|
|
|
+ struct urb *urb, int index)
|
|
|
+{
|
|
|
+ int start_frame, ist, ret = 0;
|
|
|
+ int start_frame_id, end_frame_id, current_frame_id;
|
|
|
+
|
|
|
+ if (urb->dev->speed == USB_SPEED_LOW ||
|
|
|
+ urb->dev->speed == USB_SPEED_FULL)
|
|
|
+ start_frame = urb->start_frame + index * urb->interval;
|
|
|
+ else
|
|
|
+ start_frame = (urb->start_frame + index * urb->interval) >> 3;
|
|
|
+
|
|
|
+ /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
|
|
|
+ *
|
|
|
+ * If bit [3] of IST is cleared to '0', software can add a TRB no
|
|
|
+ * later than IST[2:0] Microframes before that TRB is scheduled to
|
|
|
+ * be executed.
|
|
|
+ * If bit [3] of IST is set to '1', software can add a TRB no later
|
|
|
+ * than IST[2:0] Frames before that TRB is scheduled to be executed.
|
|
|
+ */
|
|
|
+ ist = HCS_IST(xhci->hcs_params2) & 0x7;
|
|
|
+ if (HCS_IST(xhci->hcs_params2) & (1 << 3))
|
|
|
+ ist <<= 3;
|
|
|
+
|
|
|
+ /* Software shall not schedule an Isoch TD with a Frame ID value that
|
|
|
+ * is less than the Start Frame ID or greater than the End Frame ID,
|
|
|
+ * where:
|
|
|
+ *
|
|
|
+ * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
|
|
|
+ * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
|
|
|
+ *
|
|
|
+ * Both the End Frame ID and Start Frame ID values are calculated
|
|
|
+ * in microframes. When software determines the valid Frame ID value;
|
|
|
+ * The End Frame ID value should be rounded down to the nearest Frame
|
|
|
+ * boundary, and the Start Frame ID value should be rounded up to the
|
|
|
+ * nearest Frame boundary.
|
|
|
+ */
|
|
|
+ current_frame_id = readl(&xhci->run_regs->microframe_index);
|
|
|
+ start_frame_id = roundup(current_frame_id + ist + 1, 8);
|
|
|
+ end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
|
|
|
+
|
|
|
+ start_frame &= 0x7ff;
|
|
|
+ start_frame_id = (start_frame_id >> 3) & 0x7ff;
|
|
|
+ end_frame_id = (end_frame_id >> 3) & 0x7ff;
|
|
|
+
|
|
|
+ xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
|
|
|
+ __func__, index, readl(&xhci->run_regs->microframe_index),
|
|
|
+ start_frame_id, end_frame_id, start_frame);
|
|
|
+
|
|
|
+ if (start_frame_id < end_frame_id) {
|
|
|
+ if (start_frame > end_frame_id ||
|
|
|
+ start_frame < start_frame_id)
|
|
|
+ ret = -EINVAL;
|
|
|
+ } else if (start_frame_id > end_frame_id) {
|
|
|
+ if ((start_frame > end_frame_id &&
|
|
|
+ start_frame < start_frame_id))
|
|
|
+ ret = -EINVAL;
|
|
|
+ } else {
|
|
|
+ ret = -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (index == 0) {
|
|
|
+ if (ret == -EINVAL || start_frame == start_frame_id) {
|
|
|
+ start_frame = start_frame_id + 1;
|
|
|
+ if (urb->dev->speed == USB_SPEED_LOW ||
|
|
|
+ urb->dev->speed == USB_SPEED_FULL)
|
|
|
+ urb->start_frame = start_frame;
|
|
|
+ else
|
|
|
+ urb->start_frame = start_frame << 3;
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ret) {
|
|
|
+ xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
|
|
|
+ start_frame, current_frame_id, index,
|
|
|
+ start_frame_id, end_frame_id);
|
|
|
+ xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return start_frame;
|
|
|
+}
|
|
|
+
|
|
|
/* This is for isoc transfer */
|
|
|
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
struct urb *urb, int slot_id, unsigned int ep_index)
|
|
|
@@ -3571,7 +3662,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
u64 start_addr, addr;
|
|
|
int i, j;
|
|
|
bool more_trbs_coming;
|
|
|
+ struct xhci_virt_ep *xep;
|
|
|
|
|
|
+ xep = &xhci->devs[slot_id]->eps[ep_index];
|
|
|
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
|
|
|
|
|
|
num_tds = urb->number_of_packets;
|
|
|
@@ -3619,6 +3712,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
|
|
|
td = urb_priv->td[i];
|
|
|
for (j = 0; j < trbs_per_td; j++) {
|
|
|
+ int frame_id = 0;
|
|
|
u32 remainder = 0;
|
|
|
field = 0;
|
|
|
|
|
|
@@ -3627,8 +3721,20 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
TRB_TLBPC(residue);
|
|
|
/* Queue the isoc TRB */
|
|
|
field |= TRB_TYPE(TRB_ISOC);
|
|
|
- /* Assume URB_ISO_ASAP is set */
|
|
|
- field |= TRB_SIA;
|
|
|
+
|
|
|
+ /* Calculate Frame ID and SIA fields */
|
|
|
+ if (!(urb->transfer_flags & URB_ISO_ASAP) &&
|
|
|
+ HCC_CFC(xhci->hcc_params)) {
|
|
|
+ frame_id = xhci_get_isoc_frame_id(xhci,
|
|
|
+ urb,
|
|
|
+ i);
|
|
|
+ if (frame_id >= 0)
|
|
|
+ field |= TRB_FRAME_ID(frame_id);
|
|
|
+ else
|
|
|
+ field |= TRB_SIA;
|
|
|
+ } else
|
|
|
+ field |= TRB_SIA;
|
|
|
+
|
|
|
if (i == 0) {
|
|
|
if (start_cycle == 0)
|
|
|
field |= 0x1;
|
|
|
@@ -3704,6 +3810,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /* store the next frame id */
|
|
|
+ if (HCC_CFC(xhci->hcc_params))
|
|
|
+ xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
|
|
|
+
|
|
|
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
|
|
|
if (xhci->quirks & XHCI_AMD_PLL_FIX)
|
|
|
usb_amd_quirk_pll_disable();
|
|
|
@@ -3737,12 +3847,34 @@ cleanup:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int ep_ring_is_processing(struct xhci_hcd *xhci,
|
|
|
+ int slot_id, unsigned int ep_index)
|
|
|
+{
|
|
|
+ struct xhci_virt_device *xdev;
|
|
|
+ struct xhci_ring *ep_ring;
|
|
|
+ struct xhci_ep_ctx *ep_ctx;
|
|
|
+ struct xhci_virt_ep *xep;
|
|
|
+ dma_addr_t hw_deq;
|
|
|
+
|
|
|
+ xdev = xhci->devs[slot_id];
|
|
|
+ xep = &xhci->devs[slot_id]->eps[ep_index];
|
|
|
+ ep_ring = xep->ring;
|
|
|
+ ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
|
+
|
|
|
+ if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
|
|
|
+ return (hw_deq !=
|
|
|
+ xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Check transfer ring to guarantee there is enough room for the urb.
|
|
|
* Update ISO URB start_frame and interval.
|
|
|
- * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
|
|
|
- * update the urb->start_frame by now.
|
|
|
- * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
|
|
|
+ * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
|
|
|
+ * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
|
|
|
+ * Contiguous Frame ID is not supported by HC.
|
|
|
*/
|
|
|
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
struct urb *urb, int slot_id, unsigned int ep_index)
|
|
|
@@ -3755,8 +3887,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
int ep_interval;
|
|
|
int num_tds, num_trbs, i;
|
|
|
int ret;
|
|
|
+ struct xhci_virt_ep *xep;
|
|
|
+ int ist;
|
|
|
|
|
|
xdev = xhci->devs[slot_id];
|
|
|
+ xep = &xhci->devs[slot_id]->eps[ep_index];
|
|
|
ep_ring = xdev->eps[ep_index].ring;
|
|
|
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
|
|
|
|
|
@@ -3773,14 +3908,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- start_frame = readl(&xhci->run_regs->microframe_index);
|
|
|
- start_frame &= 0x3fff;
|
|
|
-
|
|
|
- urb->start_frame = start_frame;
|
|
|
- if (urb->dev->speed == USB_SPEED_LOW ||
|
|
|
- urb->dev->speed == USB_SPEED_FULL)
|
|
|
- urb->start_frame >>= 3;
|
|
|
-
|
|
|
+ /*
|
|
|
+ * Check interval value. This should be done before we start to
|
|
|
+ * calculate the start frame value.
|
|
|
+ */
|
|
|
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
|
|
|
ep_interval = urb->interval;
|
|
|
/* Convert to microframes */
|
|
|
@@ -3801,6 +3932,40 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
urb->dev->speed == USB_SPEED_FULL)
|
|
|
urb->interval /= 8;
|
|
|
}
|
|
|
+
|
|
|
+ /* Calculate the start frame and put it in urb->start_frame. */
|
|
|
+ if (HCC_CFC(xhci->hcc_params) &&
|
|
|
+ ep_ring_is_processing(xhci, slot_id, ep_index)) {
|
|
|
+ urb->start_frame = xep->next_frame_id;
|
|
|
+ goto skip_start_over;
|
|
|
+ }
|
|
|
+
|
|
|
+ start_frame = readl(&xhci->run_regs->microframe_index);
|
|
|
+ start_frame &= 0x3fff;
|
|
|
+ /*
|
|
|
+ * Round up to the next frame and consider the time before trb really
|
|
|
+ * gets scheduled by hardare.
|
|
|
+ */
|
|
|
+ ist = HCS_IST(xhci->hcs_params2) & 0x7;
|
|
|
+ if (HCS_IST(xhci->hcs_params2) & (1 << 3))
|
|
|
+ ist <<= 3;
|
|
|
+ start_frame += ist + XHCI_CFC_DELAY;
|
|
|
+ start_frame = roundup(start_frame, 8);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
|
|
|
+ * is greate than 8 microframes.
|
|
|
+ */
|
|
|
+ if (urb->dev->speed == USB_SPEED_LOW ||
|
|
|
+ urb->dev->speed == USB_SPEED_FULL) {
|
|
|
+ start_frame = roundup(start_frame, urb->interval << 3);
|
|
|
+ urb->start_frame = start_frame >> 3;
|
|
|
+ } else {
|
|
|
+ start_frame = roundup(start_frame, urb->interval);
|
|
|
+ urb->start_frame = start_frame;
|
|
|
+ }
|
|
|
+
|
|
|
+skip_start_over:
|
|
|
ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
|
|
|
|
|
|
return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
|