|
@@ -606,7 +606,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
|
|
|
* by this look, but we want to avoid concurrent calls for performance
|
|
|
* reasons and to make the pcpu_get_vm_areas more deterministic.
|
|
|
*/
|
|
|
-static DEFINE_SPINLOCK(vmap_purge_lock);
|
|
|
+static DEFINE_MUTEX(vmap_purge_lock);
|
|
|
|
|
|
/* for per-CPU blocks */
|
|
|
static void purge_fragmented_blocks_allcpus(void);
|
|
@@ -660,9 +660,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
|
|
|
*/
|
|
|
static void try_purge_vmap_area_lazy(void)
|
|
|
{
|
|
|
- if (spin_trylock(&vmap_purge_lock)) {
|
|
|
+ if (mutex_trylock(&vmap_purge_lock)) {
|
|
|
__purge_vmap_area_lazy(ULONG_MAX, 0);
|
|
|
- spin_unlock(&vmap_purge_lock);
|
|
|
+ mutex_unlock(&vmap_purge_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -671,10 +671,10 @@ static void try_purge_vmap_area_lazy(void)
|
|
|
*/
|
|
|
static void purge_vmap_area_lazy(void)
|
|
|
{
|
|
|
- spin_lock(&vmap_purge_lock);
|
|
|
+ mutex_lock(&vmap_purge_lock);
|
|
|
purge_fragmented_blocks_allcpus();
|
|
|
__purge_vmap_area_lazy(ULONG_MAX, 0);
|
|
|
- spin_unlock(&vmap_purge_lock);
|
|
|
+ mutex_unlock(&vmap_purge_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1063,11 +1063,11 @@ void vm_unmap_aliases(void)
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
- spin_lock(&vmap_purge_lock);
|
|
|
+ mutex_lock(&vmap_purge_lock);
|
|
|
purge_fragmented_blocks_allcpus();
|
|
|
if (!__purge_vmap_area_lazy(start, end) && flush)
|
|
|
flush_tlb_kernel_range(start, end);
|
|
|
- spin_unlock(&vmap_purge_lock);
|
|
|
+ mutex_unlock(&vmap_purge_lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
|
|
|
|