|
@@ -55,6 +55,7 @@
|
|
|
#include <linux/hugetlb.h>
|
|
|
#include <linux/memory_hotplug.h>
|
|
|
#include <linux/mm_inline.h>
|
|
|
+#include <linux/memremap.h>
|
|
|
#include <linux/kfifo.h>
|
|
|
#include <linux/ratelimit.h>
|
|
|
#include <linux/page-isolation.h>
|
|
@@ -174,23 +175,52 @@ int hwpoison_filter(struct page *p)
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(hwpoison_filter);
|
|
|
|
|
|
+/*
|
|
|
+ * Kill all processes that have a poisoned page mapped and then isolate
|
|
|
+ * the page.
|
|
|
+ *
|
|
|
+ * General strategy:
|
|
|
+ * Find all processes having the page mapped and kill them.
|
|
|
+ * But we keep a page reference around so that the page is not
|
|
|
+ * actually freed yet.
|
|
|
+ * Then stash the page away
|
|
|
+ *
|
|
|
+ * There's no convenient way to get back to mapped processes
|
|
|
+ * from the VMAs. So do a brute-force search over all
|
|
|
+ * running processes.
|
|
|
+ *
|
|
|
+ * Remember that machine checks are not common (or rather
|
|
|
+ * if they are common you have other problems), so this shouldn't
|
|
|
+ * be a performance issue.
|
|
|
+ *
|
|
|
+ * Also there are some races possible while we get from the
|
|
|
+ * error detection to actually handle it.
|
|
|
+ */
|
|
|
+
|
|
|
+struct to_kill {
|
|
|
+ struct list_head nd;
|
|
|
+ struct task_struct *tsk;
|
|
|
+ unsigned long addr;
|
|
|
+ short size_shift;
|
|
|
+ char addr_valid;
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* Send all the processes who have the page mapped a signal.
|
|
|
* ``action optional'' if they are not immediately affected by the error
|
|
|
* ``action required'' if error happened in current execution context
|
|
|
*/
|
|
|
-static int kill_proc(struct task_struct *t, unsigned long addr,
|
|
|
- unsigned long pfn, struct page *page, int flags)
|
|
|
+static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
|
|
|
{
|
|
|
- short addr_lsb;
|
|
|
+ struct task_struct *t = tk->tsk;
|
|
|
+ short addr_lsb = tk->size_shift;
|
|
|
int ret;
|
|
|
|
|
|
pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
|
|
|
pfn, t->comm, t->pid);
|
|
|
- addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
|
|
|
|
|
|
if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
|
|
|
- ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr,
|
|
|
+ ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr,
|
|
|
addr_lsb, current);
|
|
|
} else {
|
|
|
/*
|
|
@@ -199,7 +229,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr,
|
|
|
* This could cause a loop when the user sets SIGBUS
|
|
|
* to SIG_IGN, but hopefully no one will do that?
|
|
|
*/
|
|
|
- ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)addr,
|
|
|
+ ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
|
|
|
addr_lsb, t); /* synchronous? */
|
|
|
}
|
|
|
if (ret < 0)
|
|
@@ -235,34 +265,39 @@ void shake_page(struct page *p, int access)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(shake_page);
|
|
|
|
|
|
-/*
|
|
|
- * Kill all processes that have a poisoned page mapped and then isolate
|
|
|
- * the page.
|
|
|
- *
|
|
|
- * General strategy:
|
|
|
- * Find all processes having the page mapped and kill them.
|
|
|
- * But we keep a page reference around so that the page is not
|
|
|
- * actually freed yet.
|
|
|
- * Then stash the page away
|
|
|
- *
|
|
|
- * There's no convenient way to get back to mapped processes
|
|
|
- * from the VMAs. So do a brute-force search over all
|
|
|
- * running processes.
|
|
|
- *
|
|
|
- * Remember that machine checks are not common (or rather
|
|
|
- * if they are common you have other problems), so this shouldn't
|
|
|
- * be a performance issue.
|
|
|
- *
|
|
|
- * Also there are some races possible while we get from the
|
|
|
- * error detection to actually handle it.
|
|
|
- */
|
|
|
-
|
|
|
-struct to_kill {
|
|
|
- struct list_head nd;
|
|
|
- struct task_struct *tsk;
|
|
|
- unsigned long addr;
|
|
|
- char addr_valid;
|
|
|
-};
|
|
|
+static unsigned long dev_pagemap_mapping_shift(struct page *page,
|
|
|
+ struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ unsigned long address = vma_address(page, vma);
|
|
|
+ pgd_t *pgd;
|
|
|
+ p4d_t *p4d;
|
|
|
+ pud_t *pud;
|
|
|
+ pmd_t *pmd;
|
|
|
+ pte_t *pte;
|
|
|
+
|
|
|
+ pgd = pgd_offset(vma->vm_mm, address);
|
|
|
+ if (!pgd_present(*pgd))
|
|
|
+ return 0;
|
|
|
+ p4d = p4d_offset(pgd, address);
|
|
|
+ if (!p4d_present(*p4d))
|
|
|
+ return 0;
|
|
|
+ pud = pud_offset(p4d, address);
|
|
|
+ if (!pud_present(*pud))
|
|
|
+ return 0;
|
|
|
+ if (pud_devmap(*pud))
|
|
|
+ return PUD_SHIFT;
|
|
|
+ pmd = pmd_offset(pud, address);
|
|
|
+ if (!pmd_present(*pmd))
|
|
|
+ return 0;
|
|
|
+ if (pmd_devmap(*pmd))
|
|
|
+ return PMD_SHIFT;
|
|
|
+ pte = pte_offset_map(pmd, address);
|
|
|
+ if (!pte_present(*pte))
|
|
|
+ return 0;
|
|
|
+ if (pte_devmap(*pte))
|
|
|
+ return PAGE_SHIFT;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* Failure handling: if we can't find or can't kill a process there's
|
|
@@ -293,6 +328,10 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
|
|
|
}
|
|
|
tk->addr = page_address_in_vma(p, vma);
|
|
|
tk->addr_valid = 1;
|
|
|
+ if (is_zone_device_page(p))
|
|
|
+ tk->size_shift = dev_pagemap_mapping_shift(p, vma);
|
|
|
+ else
|
|
|
+ tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
|
|
|
|
|
|
/*
|
|
|
* In theory we don't have to kill when the page was
|
|
@@ -300,7 +339,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
|
|
|
* likely very rare kill anyways just out of paranoia, but use
|
|
|
* a SIGKILL because the error is not contained anymore.
|
|
|
*/
|
|
|
- if (tk->addr == -EFAULT) {
|
|
|
+ if (tk->addr == -EFAULT || tk->size_shift == 0) {
|
|
|
pr_info("Memory failure: Unable to find user space address %lx in %s\n",
|
|
|
page_to_pfn(p), tsk->comm);
|
|
|
tk->addr_valid = 0;
|
|
@@ -318,9 +357,8 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
|
|
|
* Also when FAIL is set do a force kill because something went
|
|
|
* wrong earlier.
|
|
|
*/
|
|
|
-static void kill_procs(struct list_head *to_kill, int forcekill,
|
|
|
- bool fail, struct page *page, unsigned long pfn,
|
|
|
- int flags)
|
|
|
+static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
|
|
|
+ unsigned long pfn, int flags)
|
|
|
{
|
|
|
struct to_kill *tk, *next;
|
|
|
|
|
@@ -343,8 +381,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill,
|
|
|
* check for that, but we need to tell the
|
|
|
* process anyways.
|
|
|
*/
|
|
|
- else if (kill_proc(tk->tsk, tk->addr,
|
|
|
- pfn, page, flags) < 0)
|
|
|
+ else if (kill_proc(tk, pfn, flags) < 0)
|
|
|
pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
|
|
|
pfn, tk->tsk->comm, tk->tsk->pid);
|
|
|
}
|
|
@@ -516,6 +553,7 @@ static const char * const action_page_types[] = {
|
|
|
[MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
|
|
|
[MF_MSG_BUDDY] = "free buddy page",
|
|
|
[MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
|
|
|
+ [MF_MSG_DAX] = "dax page",
|
|
|
[MF_MSG_UNKNOWN] = "unknown page",
|
|
|
};
|
|
|
|
|
@@ -1013,7 +1051,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|
|
* any accesses to the poisoned memory.
|
|
|
*/
|
|
|
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
|
|
|
- kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags);
|
|
|
+ kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
|
|
|
|
|
|
return unmap_success;
|
|
|
}
|
|
@@ -1113,6 +1151,83 @@ out:
|
|
|
return res;
|
|
|
}
|
|
|
|
|
|
+static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
|
|
|
+ struct dev_pagemap *pgmap)
|
|
|
+{
|
|
|
+ struct page *page = pfn_to_page(pfn);
|
|
|
+ const bool unmap_success = true;
|
|
|
+ unsigned long size = 0;
|
|
|
+ struct to_kill *tk;
|
|
|
+ LIST_HEAD(tokill);
|
|
|
+ int rc = -EBUSY;
|
|
|
+ loff_t start;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Prevent the inode from being freed while we are interrogating
|
|
|
+ * the address_space, typically this would be handled by
|
|
|
+ * lock_page(), but dax pages do not use the page lock. This
|
|
|
+ * also prevents changes to the mapping of this pfn until
|
|
|
+ * poison signaling is complete.
|
|
|
+ */
|
|
|
+ if (!dax_lock_mapping_entry(page))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (hwpoison_filter(page)) {
|
|
|
+ rc = 0;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (pgmap->type) {
|
|
|
+ case MEMORY_DEVICE_PRIVATE:
|
|
|
+ case MEMORY_DEVICE_PUBLIC:
|
|
|
+ /*
|
|
|
+ * TODO: Handle HMM pages which may need coordination
|
|
|
+ * with device-side memory.
|
|
|
+ */
|
|
|
+ goto unlock;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Use this flag as an indication that the dax page has been
|
|
|
+ * remapped UC to prevent speculative consumption of poison.
|
|
|
+ */
|
|
|
+ SetPageHWPoison(page);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Unlike System-RAM there is no possibility to swap in a
|
|
|
+ * different physical page at a given virtual address, so all
|
|
|
+ * userspace consumption of ZONE_DEVICE memory necessitates
|
|
|
+ * SIGBUS (i.e. MF_MUST_KILL)
|
|
|
+ */
|
|
|
+ flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
|
|
|
+ collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
|
|
|
+
|
|
|
+ list_for_each_entry(tk, &tokill, nd)
|
|
|
+ if (tk->size_shift)
|
|
|
+ size = max(size, 1UL << tk->size_shift);
|
|
|
+ if (size) {
|
|
|
+ /*
|
|
|
+ * Unmap the largest mapping to avoid breaking up
|
|
|
+ * device-dax mappings which are constant size. The
|
|
|
+ * actual size of the mapping being torn down is
|
|
|
+ * communicated in siginfo, see kill_proc()
|
|
|
+ */
|
|
|
+ start = (page->index << PAGE_SHIFT) & ~(size - 1);
|
|
|
+ unmap_mapping_range(page->mapping, start, start + size, 0);
|
|
|
+ }
|
|
|
+ kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
|
|
|
+ rc = 0;
|
|
|
+unlock:
|
|
|
+ dax_unlock_mapping_entry(page);
|
|
|
+out:
|
|
|
+ /* drop pgmap ref acquired in caller */
|
|
|
+ put_dev_pagemap(pgmap);
|
|
|
+ action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* memory_failure - Handle memory failure of a page.
|
|
|
* @pfn: Page Number of the corrupted page
|
|
@@ -1135,6 +1250,7 @@ int memory_failure(unsigned long pfn, int flags)
|
|
|
struct page *p;
|
|
|
struct page *hpage;
|
|
|
struct page *orig_head;
|
|
|
+ struct dev_pagemap *pgmap;
|
|
|
int res;
|
|
|
unsigned long page_flags;
|
|
|
|
|
@@ -1147,6 +1263,10 @@ int memory_failure(unsigned long pfn, int flags)
|
|
|
return -ENXIO;
|
|
|
}
|
|
|
|
|
|
+ pgmap = get_dev_pagemap(pfn, NULL);
|
|
|
+ if (pgmap)
|
|
|
+ return memory_failure_dev_pagemap(pfn, flags, pgmap);
|
|
|
+
|
|
|
p = pfn_to_page(pfn);
|
|
|
if (PageHuge(p))
|
|
|
return memory_failure_hugetlb(pfn, flags);
|
|
@@ -1777,6 +1897,14 @@ int soft_offline_page(struct page *page, int flags)
|
|
|
int ret;
|
|
|
unsigned long pfn = page_to_pfn(page);
|
|
|
|
|
|
+ if (is_zone_device_page(page)) {
|
|
|
+ pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
|
|
|
+ pfn);
|
|
|
+ if (flags & MF_COUNT_INCREASED)
|
|
|
+ put_page(page);
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
if (PageHWPoison(page)) {
|
|
|
pr_info("soft offline: %#lx page already poisoned\n", pfn);
|
|
|
if (flags & MF_COUNT_INCREASED)
|