|
@@ -1859,21 +1859,6 @@ static inline bool queue_ring_full(struct flush_queue *queue)
|
|
|
#define queue_ring_for_each(i, q) \
|
|
|
for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
|
|
|
|
|
|
-static void queue_release(struct dma_ops_domain *dom,
|
|
|
- struct flush_queue *queue)
|
|
|
-{
|
|
|
- unsigned i;
|
|
|
-
|
|
|
- assert_spin_locked(&queue->lock);
|
|
|
-
|
|
|
- queue_ring_for_each(i, queue)
|
|
|
- free_iova_fast(&dom->iovad,
|
|
|
- queue->entries[i].iova_pfn,
|
|
|
- queue->entries[i].pages);
|
|
|
-
|
|
|
- queue->head = queue->tail = 0;
|
|
|
-}
|
|
|
-
|
|
|
static inline unsigned queue_ring_add(struct flush_queue *queue)
|
|
|
{
|
|
|
unsigned idx = queue->tail;
|
|
@@ -1925,12 +1910,15 @@ static void queue_add(struct dma_ops_domain *dom,
|
|
|
queue = get_cpu_ptr(dom->flush_queue);
|
|
|
spin_lock_irqsave(&queue->lock, flags);
|
|
|
|
|
|
- queue_ring_free_flushed(dom, queue);
|
|
|
-
|
|
|
- if (queue_ring_full(queue)) {
|
|
|
+ /*
|
|
|
+ * When ring-queue is full, flush the entries from the IOTLB so
|
|
|
+ * that we can free all entries with queue_ring_free_flushed()
|
|
|
+ * below.
|
|
|
+ */
|
|
|
+ if (queue_ring_full(queue))
|
|
|
dma_ops_domain_flush_tlb(dom);
|
|
|
- queue_release(dom, queue);
|
|
|
- }
|
|
|
+
|
|
|
+ queue_ring_free_flushed(dom, queue);
|
|
|
|
|
|
idx = queue_ring_add(queue);
|
|
|
|