|
@@ -105,6 +105,10 @@ static struct irq_work mce_irq_work;
|
|
|
|
|
|
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
|
|
|
|
|
|
+#ifndef mce_unmap_kpfn
|
|
|
+static void mce_unmap_kpfn(unsigned long pfn);
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* CPU/chipset specific EDAC code can register a notifier call here to print
|
|
|
* MCE errors in a human-readable form.
|
|
@@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
|
|
|
|
|
|
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
|
|
|
pfn = mce->addr >> PAGE_SHIFT;
|
|
|
- memory_failure(pfn, 0);
|
|
|
+ if (!memory_failure(pfn, 0))
|
|
|
+ mce_unmap_kpfn(pfn);
|
|
|
}
|
|
|
|
|
|
return NOTIFY_OK;
|
|
@@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m)
|
|
|
ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
|
|
|
if (ret)
|
|
|
pr_err("Memory error not recovered");
|
|
|
+ else
|
|
|
+ mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
|
|
|
-
|
|
|
-void arch_unmap_kpfn(unsigned long pfn)
|
|
|
+#ifndef mce_unmap_kpfn
|
|
|
+static void mce_unmap_kpfn(unsigned long pfn)
|
|
|
{
|
|
|
unsigned long decoy_addr;
|
|
|
|
|
@@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn)
|
|
|
* We would like to just call:
|
|
|
* set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
|
|
|
* but doing that would radically increase the odds of a
|
|
|
- * speculative access to the posion page because we'd have
|
|
|
+ * speculative access to the poison page because we'd have
|
|
|
* the virtual address of the kernel 1:1 mapping sitting
|
|
|
* around in registers.
|
|
|
* Instead we get tricky. We create a non-canonical address
|
|
@@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn)
|
|
|
|
|
|
if (set_memory_np(decoy_addr, 1))
|
|
|
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
|
|
|
-
|
|
|
}
|
|
|
#endif
|
|
|
|