|
@@ -1486,7 +1486,39 @@ static void __vunmap(const void *addr, int deallocate_pages)
|
|
kfree(area);
|
|
kfree(area);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+
|
|
|
|
+static inline void __vfree_deferred(const void *addr)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * Use raw_cpu_ptr() because this can be called from preemptible
|
|
|
|
+ * context. Preemption is absolutely fine here, because the llist_add()
|
|
|
|
+ * implementation is lockless, so it works even if we are adding to
|
|
|
|
+ * nother cpu's list. schedule_work() should be fine with this too.
|
|
|
|
+ */
|
|
|
|
+ struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
|
|
|
|
+
|
|
|
|
+ if (llist_add((struct llist_node *)addr, &p->list))
|
|
|
|
+ schedule_work(&p->wq);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * vfree_atomic - release memory allocated by vmalloc()
|
|
|
|
+ * @addr: memory base address
|
|
|
|
+ *
|
|
|
|
+ * This one is just like vfree() but can be called in any atomic context
|
|
|
|
+ * except NMIs.
|
|
|
|
+ */
|
|
|
|
+void vfree_atomic(const void *addr)
|
|
|
|
+{
|
|
|
|
+ BUG_ON(in_nmi());
|
|
|
|
+
|
|
|
|
+ kmemleak_free(addr);
|
|
|
|
+
|
|
|
|
+ if (!addr)
|
|
|
|
+ return;
|
|
|
|
+ __vfree_deferred(addr);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* vfree - release memory allocated by vmalloc()
|
|
* vfree - release memory allocated by vmalloc()
|
|
* @addr: memory base address
|
|
* @addr: memory base address
|
|
@@ -1509,11 +1541,9 @@ void vfree(const void *addr)
|
|
|
|
|
|
if (!addr)
|
|
if (!addr)
|
|
return;
|
|
return;
|
|
- if (unlikely(in_interrupt())) {
|
|
|
|
- struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
|
|
|
|
- if (llist_add((struct llist_node *)addr, &p->list))
|
|
|
|
- schedule_work(&p->wq);
|
|
|
|
- } else
|
|
|
|
|
|
+ if (unlikely(in_interrupt()))
|
|
|
|
+ __vfree_deferred(addr);
|
|
|
|
+ else
|
|
__vunmap(addr, 1);
|
|
__vunmap(addr, 1);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(vfree);
|
|
EXPORT_SYMBOL(vfree);
|