|
@@ -792,6 +792,40 @@ DEFINE_ASAN_SET_SHADOW(f5);
|
|
|
DEFINE_ASAN_SET_SHADOW(f8);
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
|
+static bool shadow_mapped(unsigned long addr)
|
|
|
+{
|
|
|
+ pgd_t *pgd = pgd_offset_k(addr);
|
|
|
+ p4d_t *p4d;
|
|
|
+ pud_t *pud;
|
|
|
+ pmd_t *pmd;
|
|
|
+ pte_t *pte;
|
|
|
+
|
|
|
+ if (pgd_none(*pgd))
|
|
|
+ return false;
|
|
|
+ p4d = p4d_offset(pgd, addr);
|
|
|
+ if (p4d_none(*p4d))
|
|
|
+ return false;
|
|
|
+ pud = pud_offset(p4d, addr);
|
|
|
+ if (pud_none(*pud))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We can't use pud_large() or pud_huge(), the first one is
|
|
|
+ * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
|
|
|
+ * pud_bad(), if pud is bad then it's bad because it's huge.
|
|
|
+ */
|
|
|
+ if (pud_bad(*pud))
|
|
|
+ return true;
|
|
|
+ pmd = pmd_offset(pud, addr);
|
|
|
+ if (pmd_none(*pmd))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (pmd_bad(*pmd))
|
|
|
+ return true;
|
|
|
+ pte = pte_offset_kernel(pmd, addr);
|
|
|
+ return !pte_none(*pte);
|
|
|
+}
|
|
|
+
|
|
|
static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
|
|
unsigned long action, void *data)
|
|
|
{
|
|
@@ -813,6 +847,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
|
|
case MEM_GOING_ONLINE: {
|
|
|
void *ret;
|
|
|
|
|
|
+ /*
|
|
|
+ * If shadow is mapped already than it must have been mapped
|
|
|
+ * during the boot. This could happen if we onlining previously
|
|
|
+ * offlined memory.
|
|
|
+ */
|
|
|
+ if (shadow_mapped(shadow_start))
|
|
|
+ return NOTIFY_OK;
|
|
|
+
|
|
|
ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
|
|
|
shadow_end, GFP_KERNEL,
|
|
|
PAGE_KERNEL, VM_NO_GUARD,
|
|
@@ -824,8 +866,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
|
|
|
kmemleak_ignore(ret);
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
- case MEM_OFFLINE:
|
|
|
- vfree((void *)shadow_start);
|
|
|
+ case MEM_CANCEL_ONLINE:
|
|
|
+ case MEM_OFFLINE: {
|
|
|
+ struct vm_struct *vm;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * shadow_start was either mapped during boot by kasan_init()
|
|
|
+ * or during memory online by __vmalloc_node_range().
|
|
|
+ * In the latter case we can use vfree() to free shadow.
|
|
|
+ * Non-NULL result of the find_vm_area() will tell us if
|
|
|
+ * that was the second case.
|
|
|
+ *
|
|
|
+ * Currently it's not possible to free shadow mapped
|
|
|
+ * during boot by kasan_init(). It's because the code
|
|
|
+ * to do that hasn't been written yet. So we'll just
|
|
|
+ * leak the memory.
|
|
|
+ */
|
|
|
+ vm = find_vm_area((void *)shadow_start);
|
|
|
+ if (vm)
|
|
|
+ vfree((void *)shadow_start);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
return NOTIFY_OK;
|
|
@@ -838,5 +898,5 @@ static int __init kasan_memhotplug_init(void)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-module_init(kasan_memhotplug_init);
|
|
|
+core_initcall(kasan_memhotplug_init);
|
|
|
#endif
|