|
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|
|
pte_t pte;
|
|
|
unsigned long pfn;
|
|
|
struct page *page;
|
|
|
+ unsigned char dummy;
|
|
|
|
|
|
ptep = lookup_address((unsigned long)v, &level);
|
|
|
BUG_ON(ptep == NULL);
|
|
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|
|
|
|
|
pte = pfn_pte(pfn, prot);
|
|
|
|
|
|
+ /*
|
|
|
+ * Careful: update_va_mapping() will fail if the virtual address
|
|
|
+ * we're poking isn't populated in the page tables. We don't
|
|
|
+ * need to worry about the direct map (that's always in the page
|
|
|
+ * tables), but we need to be careful about vmap space. In
|
|
|
+ * particular, the top level page table can lazily propagate
|
|
|
+ * entries between processes, so if we've switched mms since we
|
|
|
+ * vmapped the target in the first place, we might not have the
|
|
|
+ * top-level page table entry populated.
|
|
|
+ *
|
|
|
+ * We disable preemption because we want the same mm active when
|
|
|
+ * we probe the target and when we issue the hypercall. We'll
|
|
|
+ * have the same nominal mm, but if we're a kernel thread, lazy
|
|
|
+ * mm dropping could change our pgd.
|
|
|
+ *
|
|
|
+ * Out of an abundance of caution, this uses __get_user() to fault
|
|
|
+ * in the target address just in case there's some obscure case
|
|
|
+ * in which the target address isn't readable.
|
|
|
+ */
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+ pagefault_disable(); /* Avoid warnings due to being atomic. */
|
|
|
+ __get_user(dummy, (unsigned char __user __force *)v);
|
|
|
+ pagefault_enable();
|
|
|
+
|
|
|
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
|
|
BUG();
|
|
|
|
|
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
|
|
BUG();
|
|
|
} else
|
|
|
kmap_flush_unused();
|
|
|
+
|
|
|
+ preempt_enable();
|
|
|
}
|
|
|
|
|
|
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
|
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
|
|
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
|
|
int i;
|
|
|
|
|
|
+ /*
|
|
|
+ * We need to mark the all aliases of the LDT pages RO. We
|
|
|
+ * don't need to call vm_flush_aliases(), though, since that's
|
|
|
+ * only responsible for flushing aliases out the TLBs, not the
|
|
|
+ * page tables, and Xen will flush the TLB for us if needed.
|
|
|
+ *
|
|
|
+ * To avoid confusing future readers: none of this is necessary
|
|
|
+ * to load the LDT. The hypervisor only checks this when the
|
|
|
+ * LDT is faulted in due to subsequent descriptor access.
|
|
|
+ */
|
|
|
+
|
|
|
for(i = 0; i < entries; i += entries_per_page)
|
|
|
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
|
|
|
}
|