|
@@ -80,19 +80,55 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
|
|
|
do {
|
|
|
/*
|
|
|
* Need to have the least restrictive permissions available
|
|
|
- * permissions will be fixed up later
|
|
|
+ * permissions will be fixed up later. Default the new page
|
|
|
+ * range as contiguous ptes.
|
|
|
*/
|
|
|
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
|
|
|
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
|
|
|
pfn++;
|
|
|
} while (pte++, i++, i < PTRS_PER_PTE);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Given a PTE with the CONT bit set, determine where the CONT range
|
|
|
+ * starts, and clear the entire range of PTE CONT bits.
|
|
|
+ */
|
|
|
+static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ pte -= CONT_RANGE_OFFSET(addr);
|
|
|
+ for (i = 0; i < CONT_PTES; i++) {
|
|
|
+ set_pte(pte, pte_mknoncont(*pte));
|
|
|
+ pte++;
|
|
|
+ }
|
|
|
+ flush_tlb_all();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Given a range of PTEs set the pfn and provided page protection flags
|
|
|
+ */
|
|
|
+static void __populate_init_pte(pte_t *pte, unsigned long addr,
|
|
|
+ unsigned long end, phys_addr_t phys,
|
|
|
+ pgprot_t prot)
|
|
|
+{
|
|
|
+ unsigned long pfn = __phys_to_pfn(phys);
|
|
|
+
|
|
|
+ do {
|
|
|
+ /* clear all the bits except the pfn, then apply the prot */
|
|
|
+ set_pte(pte, pfn_pte(pfn, prot));
|
|
|
+ pte++;
|
|
|
+ pfn++;
|
|
|
+ addr += PAGE_SIZE;
|
|
|
+ } while (addr != end);
|
|
|
+}
|
|
|
+
|
|
|
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
- unsigned long end, unsigned long pfn,
|
|
|
+ unsigned long end, phys_addr_t phys,
|
|
|
pgprot_t prot,
|
|
|
void *(*alloc)(unsigned long size))
|
|
|
{
|
|
|
pte_t *pte;
|
|
|
+ unsigned long next;
|
|
|
|
|
|
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
|
|
pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
|
|
@@ -105,9 +141,27 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
|
do {
|
|
|
- set_pte(pte, pfn_pte(pfn, prot));
|
|
|
- pfn++;
|
|
|
- } while (pte++, addr += PAGE_SIZE, addr != end);
|
|
|
+ next = min(end, (addr + CONT_SIZE) & CONT_MASK);
|
|
|
+ if (((addr | next | phys) & ~CONT_MASK) == 0) {
|
|
|
+ /* a block of CONT_PTES */
|
|
|
+ __populate_init_pte(pte, addr, next, phys,
|
|
|
+ prot | __pgprot(PTE_CONT));
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * If the range being split is already inside of a
|
|
|
+ * contiguous range but this PTE isn't going to be
|
|
|
+ * contiguous, then we want to unmark the adjacent
|
|
|
+ * ranges, then update the portion of the range we
|
|
|
+ * are interrested in.
|
|
|
+ */
|
|
|
+ clear_cont_pte_range(pte, addr);
|
|
|
+ __populate_init_pte(pte, addr, next, phys, prot);
|
|
|
+ }
|
|
|
+
|
|
|
+ pte += (next - addr) >> PAGE_SHIFT;
|
|
|
+ phys += next - addr;
|
|
|
+ addr = next;
|
|
|
+ } while (addr != end);
|
|
|
}
|
|
|
|
|
|
void split_pud(pud_t *old_pud, pmd_t *pmd)
|
|
@@ -168,8 +222,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
- alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
|
|
|
- prot, alloc);
|
|
|
+ alloc_init_pte(pmd, addr, next, phys, prot, alloc);
|
|
|
}
|
|
|
phys += next - addr;
|
|
|
} while (pmd++, addr = next, addr != end);
|