|
@@ -357,7 +357,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
|
|
|
* Set the end flag and the cycle toggle bit on the last segment.
|
|
|
* See section 4.9.1 and figures 15 and 16.
|
|
|
*/
|
|
|
-static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
|
|
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
|
|
unsigned int num_segs, unsigned int cycle_state,
|
|
|
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
|
|
|
{
|
|
@@ -454,7 +454,7 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
|
|
+struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
|
|
int type, gfp_t flags)
|
|
|
{
|
|
|
struct xhci_container_ctx *ctx;
|
|
@@ -479,7 +479,7 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci
|
|
|
return ctx;
|
|
|
}
|
|
|
|
|
|
-static void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
|
|
+void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
|
|
struct xhci_container_ctx *ctx)
|
|
|
{
|
|
|
if (!ctx)
|
|
@@ -1757,21 +1757,61 @@ void xhci_free_command(struct xhci_hcd *xhci,
|
|
|
kfree(command);
|
|
|
}
|
|
|
|
|
|
+int xhci_alloc_erst(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_ring *evt_ring,
|
|
|
+ struct xhci_erst *erst,
|
|
|
+ gfp_t flags)
|
|
|
+{
|
|
|
+ size_t size;
|
|
|
+ unsigned int val;
|
|
|
+ struct xhci_segment *seg;
|
|
|
+ struct xhci_erst_entry *entry;
|
|
|
+
|
|
|
+ size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
|
|
|
+ erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
|
|
|
+ size,
|
|
|
+ &erst->erst_dma_addr,
|
|
|
+ flags);
|
|
|
+ if (!erst->entries)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ memset(erst->entries, 0, size);
|
|
|
+ erst->num_entries = evt_ring->num_segs;
|
|
|
+
|
|
|
+ seg = evt_ring->first_seg;
|
|
|
+ for (val = 0; val < evt_ring->num_segs; val++) {
|
|
|
+ entry = &erst->entries[val];
|
|
|
+ entry->seg_addr = cpu_to_le64(seg->dma);
|
|
|
+ entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
|
|
|
+ entry->rsvd = 0;
|
|
|
+ seg = seg->next;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
|
|
|
+{
|
|
|
+ size_t size;
|
|
|
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
|
|
+
|
|
|
+ size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
|
|
|
+ if (erst->entries)
|
|
|
+ dma_free_coherent(dev, size,
|
|
|
+ erst->entries,
|
|
|
+ erst->erst_dma_addr);
|
|
|
+ erst->entries = NULL;
|
|
|
+}
|
|
|
+
|
|
|
void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|
|
{
|
|
|
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
|
|
- int size;
|
|
|
int i, j, num_ports;
|
|
|
|
|
|
cancel_delayed_work_sync(&xhci->cmd_timer);
|
|
|
|
|
|
- /* Free the Event Ring Segment Table and the actual Event Ring */
|
|
|
- size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
|
|
|
- if (xhci->erst.entries)
|
|
|
- dma_free_coherent(dev, size,
|
|
|
- xhci->erst.entries, xhci->erst.erst_dma_addr);
|
|
|
- xhci->erst.entries = NULL;
|
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
|
|
|
+ xhci_free_erst(xhci, &xhci->erst);
|
|
|
+
|
|
|
if (xhci->event_ring)
|
|
|
xhci_ring_free(xhci, xhci->event_ring);
|
|
|
xhci->event_ring = NULL;
|
|
@@ -2308,9 +2348,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|
|
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
|
|
unsigned int val, val2;
|
|
|
u64 val_64;
|
|
|
- struct xhci_segment *seg;
|
|
|
- u32 page_size, temp;
|
|
|
- int i;
|
|
|
+ u32 page_size, temp;
|
|
|
+ int i, ret;
|
|
|
|
|
|
INIT_LIST_HEAD(&xhci->cmd_list);
|
|
|
|
|
@@ -2449,32 +2488,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|
|
if (xhci_check_trb_in_td_math(xhci) < 0)
|
|
|
goto fail;
|
|
|
|
|
|
- xhci->erst.entries = dma_alloc_coherent(dev,
|
|
|
- sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
|
|
|
- flags);
|
|
|
- if (!xhci->erst.entries)
|
|
|
+ ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
|
|
|
+ if (ret)
|
|
|
goto fail;
|
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
|
|
- "// Allocated event ring segment table at 0x%llx",
|
|
|
- (unsigned long long)dma);
|
|
|
-
|
|
|
- memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
|
|
|
- xhci->erst.num_entries = ERST_NUM_SEGS;
|
|
|
- xhci->erst.erst_dma_addr = dma;
|
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
|
|
- "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
|
|
|
- xhci->erst.num_entries,
|
|
|
- xhci->erst.entries,
|
|
|
- (unsigned long long)xhci->erst.erst_dma_addr);
|
|
|
-
|
|
|
- /* set ring base address and size for each segment table entry */
|
|
|
- for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
|
|
|
- struct xhci_erst_entry *entry = &xhci->erst.entries[val];
|
|
|
- entry->seg_addr = cpu_to_le64(seg->dma);
|
|
|
- entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
|
|
|
- entry->rsvd = 0;
|
|
|
- seg = seg->next;
|
|
|
- }
|
|
|
|
|
|
/* set ERST count with the number of entries in the segment table */
|
|
|
val = readl(&xhci->ir_set->erst_size);
|