|
|
@@ -65,9 +65,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(init_iova_domain);
|
|
|
|
|
|
+bool has_iova_flush_queue(struct iova_domain *iovad)
|
|
|
+{
|
|
|
+ return !!iovad->fq;
|
|
|
+}
|
|
|
+
|
|
|
static void free_iova_flush_queue(struct iova_domain *iovad)
|
|
|
{
|
|
|
- if (!iovad->fq)
|
|
|
+ if (!has_iova_flush_queue(iovad))
|
|
|
return;
|
|
|
|
|
|
if (timer_pending(&iovad->fq_timer))
|
|
|
@@ -85,13 +90,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
|
|
|
int init_iova_flush_queue(struct iova_domain *iovad,
|
|
|
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
|
|
|
{
|
|
|
+ struct iova_fq __percpu *queue;
|
|
|
int cpu;
|
|
|
|
|
|
atomic64_set(&iovad->fq_flush_start_cnt, 0);
|
|
|
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
|
|
|
|
|
|
- iovad->fq = alloc_percpu(struct iova_fq);
|
|
|
- if (!iovad->fq)
|
|
|
+ queue = alloc_percpu(struct iova_fq);
|
|
|
+ if (!queue)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
iovad->flush_cb = flush_cb;
|
|
|
@@ -100,13 +106,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
struct iova_fq *fq;
|
|
|
|
|
|
- fq = per_cpu_ptr(iovad->fq, cpu);
|
|
|
+ fq = per_cpu_ptr(queue, cpu);
|
|
|
fq->head = 0;
|
|
|
fq->tail = 0;
|
|
|
|
|
|
spin_lock_init(&fq->lock);
|
|
|
}
|
|
|
|
|
|
+ smp_wmb();
|
|
|
+
|
|
|
+ iovad->fq = queue;
|
|
|
+
|
|
|
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
|
|
|
atomic_set(&iovad->fq_timer_on, 0);
|
|
|
|