|
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
|
|
|
if (p4d_none(*p4d))
|
|
if (p4d_none(*p4d))
|
|
|
return NULL;
|
|
return NULL;
|
|
|
pud = pud_offset(p4d, addr);
|
|
pud = pud_offset(p4d, addr);
|
|
|
- if (pud_none(*pud))
|
|
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Don't dereference bad PUD or PMD (below) entries. This will also
|
|
|
|
|
+ * identify huge mappings, which we may encounter on architectures
|
|
|
|
|
+ * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
|
|
|
|
|
+ * identified as vmalloc addresses by is_vmalloc_addr(), but are
|
|
|
|
|
+ * not [unambiguously] associated with a struct page, so there is
|
|
|
|
|
+ * no correct value to return for them.
|
|
|
|
|
+ */
|
|
|
|
|
+ WARN_ON_ONCE(pud_bad(*pud));
|
|
|
|
|
+ if (pud_none(*pud) || pud_bad(*pud))
|
|
|
return NULL;
|
|
return NULL;
|
|
|
pmd = pmd_offset(pud, addr);
|
|
pmd = pmd_offset(pud, addr);
|
|
|
- if (pmd_none(*pmd))
|
|
|
|
|
|
|
+ WARN_ON_ONCE(pmd_bad(*pmd));
|
|
|
|
|
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
|
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
|
|
ptep = pte_offset_map(pmd, addr);
|
|
ptep = pte_offset_map(pmd, addr);
|