|
@@ -531,22 +531,21 @@ static void unmap_vmap_area(struct vmap_area *va)
|
|
|
static void vmap_debug_free_range(unsigned long start, unsigned long end)
|
|
|
{
|
|
|
/*
|
|
|
- * Unmap page tables and force a TLB flush immediately if
|
|
|
- * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
|
|
|
- * bugs similarly to those in linear kernel virtual address
|
|
|
- * space after a page has been freed.
|
|
|
+ * Unmap page tables and force a TLB flush immediately if pagealloc
|
|
|
+ * debugging is enabled. This catches use after free bugs similarly to
|
|
|
+ * those in linear kernel virtual address space after a page has been
|
|
|
+ * freed.
|
|
|
*
|
|
|
- * All the lazy freeing logic is still retained, in order to
|
|
|
- * minimise intrusiveness of this debugging feature.
|
|
|
+ * All the lazy freeing logic is still retained, in order to minimise
|
|
|
+ * intrusiveness of this debugging feature.
|
|
|
*
|
|
|
- * This is going to be *slow* (linear kernel virtual address
|
|
|
- * debugging doesn't do a broadcast TLB flush so it is a lot
|
|
|
- * faster).
|
|
|
+ * This is going to be *slow* (linear kernel virtual address debugging
|
|
|
+ * doesn't do a broadcast TLB flush so it is a lot faster).
|
|
|
*/
|
|
|
-#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
- vunmap_page_range(start, end);
|
|
|
- flush_tlb_kernel_range(start, end);
|
|
|
-#endif
|
|
|
+ if (debug_pagealloc_enabled()) {
|
|
|
+ vunmap_page_range(start, end);
|
|
|
+ flush_tlb_kernel_range(start, end);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|