|
|
@@ -666,6 +666,45 @@ static int split_large_page(pte_t *kpte, unsigned long address)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+#define populate_pud(cpa, addr, pgd, pgprot) (-1)
|
|
|
+
|
|
|
+/*
|
|
|
+ * Restrictions for kernel page table do not necessarily apply when mapping in
|
|
|
+ * an alternate PGD.
|
|
|
+ */
|
|
|
+static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
|
|
|
+{
|
|
|
+ pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
|
|
|
+ bool allocd_pgd = false;
|
|
|
+ pgd_t *pgd_entry;
|
|
|
+ pud_t *pud = NULL; /* shut up gcc */
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ pgd_entry = cpa->pgd + pgd_index(addr);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Allocate a PUD page and hand it down for mapping.
|
|
|
+ */
|
|
|
+ if (pgd_none(*pgd_entry)) {
|
|
|
+ pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
|
|
|
+ if (!pud)
|
|
|
+ return -1;
|
|
|
+
|
|
|
+ set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
|
|
|
+ allocd_pgd = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
|
|
|
+ pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
|
|
|
+
|
|
|
+ ret = populate_pud(cpa, addr, pgd_entry, pgprot);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ cpa->numpages = ret;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
|
|
|
int primary)
|
|
|
{
|