|
@@ -481,7 +481,7 @@ struct deferred_flush_data {
|
|
|
struct deferred_flush_table *tables;
|
|
|
};
|
|
|
|
|
|
-DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
|
|
|
+static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
|
|
|
|
|
|
/* bitmap for indexing intel_iommus */
|
|
|
static int g_num_of_iommus;
|
|
@@ -3710,10 +3710,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
|
|
|
struct intel_iommu *iommu;
|
|
|
struct deferred_flush_entry *entry;
|
|
|
struct deferred_flush_data *flush_data;
|
|
|
- unsigned int cpuid;
|
|
|
|
|
|
- cpuid = get_cpu();
|
|
|
- flush_data = per_cpu_ptr(&deferred_flush, cpuid);
|
|
|
+ flush_data = raw_cpu_ptr(&deferred_flush);
|
|
|
|
|
|
/* Flush all CPUs' entries to avoid deferring too much. If
|
|
|
* this becomes a bottleneck, can just flush us, and rely on
|
|
@@ -3746,8 +3744,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
|
|
|
}
|
|
|
flush_data->size++;
|
|
|
spin_unlock_irqrestore(&flush_data->lock, flags);
|
|
|
-
|
|
|
- put_cpu();
|
|
|
}
|
|
|
|
|
|
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|