|
@@ -40,6 +40,7 @@
|
|
*/
|
|
*/
|
|
#include <linux/sched.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/highmem.h>
|
|
|
|
+#include <linux/debugfs.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/bug.h>
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgtable.h>
|
|
@@ -57,6 +58,61 @@
|
|
|
|
|
|
#include "multicalls.h"
|
|
#include "multicalls.h"
|
|
#include "mmu.h"
|
|
#include "mmu.h"
|
|
|
|
+#include "debugfs.h"
|
|
|
|
+
|
|
|
|
+#define MMU_UPDATE_HISTO 30
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_XEN_DEBUG_FS
|
|
|
|
+
|
|
|
|
+static struct {
|
|
|
|
+ u32 pgd_update;
|
|
|
|
+ u32 pgd_update_pinned;
|
|
|
|
+ u32 pgd_update_batched;
|
|
|
|
+
|
|
|
|
+ u32 pud_update;
|
|
|
|
+ u32 pud_update_pinned;
|
|
|
|
+ u32 pud_update_batched;
|
|
|
|
+
|
|
|
|
+ u32 pmd_update;
|
|
|
|
+ u32 pmd_update_pinned;
|
|
|
|
+ u32 pmd_update_batched;
|
|
|
|
+
|
|
|
|
+ u32 pte_update;
|
|
|
|
+ u32 pte_update_pinned;
|
|
|
|
+ u32 pte_update_batched;
|
|
|
|
+
|
|
|
|
+ u32 mmu_update;
|
|
|
|
+ u32 mmu_update_extended;
|
|
|
|
+ u32 mmu_update_histo[MMU_UPDATE_HISTO];
|
|
|
|
+
|
|
|
|
+ u32 prot_commit;
|
|
|
|
+ u32 prot_commit_batched;
|
|
|
|
+
|
|
|
|
+ u32 set_pte_at;
|
|
|
|
+ u32 set_pte_at_batched;
|
|
|
|
+ u32 set_pte_at_pinned;
|
|
|
|
+ u32 set_pte_at_current;
|
|
|
|
+ u32 set_pte_at_kernel;
|
|
|
|
+} mmu_stats;
|
|
|
|
+
|
|
|
|
+static u8 zero_stats;
|
|
|
|
+
|
|
|
|
+static inline void check_zero(void)
|
|
|
|
+{
|
|
|
|
+ if (unlikely(zero_stats)) {
|
|
|
|
+ memset(&mmu_stats, 0, sizeof(mmu_stats));
|
|
|
|
+ zero_stats = 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define ADD_STATS(elem, val) \
|
|
|
|
+ do { check_zero(); mmu_stats.elem += (val); } while(0)
|
|
|
|
+
|
|
|
|
+#else /* !CONFIG_XEN_DEBUG_FS */
|
|
|
|
+
|
|
|
|
+#define ADD_STATS(elem, val) do { (void)(val); } while(0)
|
|
|
|
+
|
|
|
|
+#endif /* CONFIG_XEN_DEBUG_FS */
|
|
|
|
|
|
/*
|
|
/*
|
|
* Just beyond the highest usermode address. STACK_TOP_MAX has a
|
|
* Just beyond the highest usermode address. STACK_TOP_MAX has a
|
|
@@ -229,25 +285,35 @@ void make_lowmem_page_readwrite(void *vaddr)
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-static bool page_pinned(void *ptr)
|
|
|
|
|
|
+static bool xen_page_pinned(void *ptr)
|
|
{
|
|
{
|
|
struct page *page = virt_to_page(ptr);
|
|
struct page *page = virt_to_page(ptr);
|
|
|
|
|
|
return PagePinned(page);
|
|
return PagePinned(page);
|
|
}
|
|
}
|
|
|
|
|
|
-static void extend_mmu_update(const struct mmu_update *update)
|
|
|
|
|
|
+static void xen_extend_mmu_update(const struct mmu_update *update)
|
|
{
|
|
{
|
|
struct multicall_space mcs;
|
|
struct multicall_space mcs;
|
|
struct mmu_update *u;
|
|
struct mmu_update *u;
|
|
|
|
|
|
mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
|
|
mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
|
|
|
|
|
|
- if (mcs.mc != NULL)
|
|
|
|
|
|
+ if (mcs.mc != NULL) {
|
|
|
|
+ ADD_STATS(mmu_update_extended, 1);
|
|
|
|
+ ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
|
|
|
|
+
|
|
mcs.mc->args[1]++;
|
|
mcs.mc->args[1]++;
|
|
- else {
|
|
|
|
|
|
+
|
|
|
|
+ if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
|
|
|
|
+ ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
|
|
|
|
+ else
|
|
|
|
+ ADD_STATS(mmu_update_histo[0], 1);
|
|
|
|
+ } else {
|
|
|
|
+ ADD_STATS(mmu_update, 1);
|
|
mcs = __xen_mc_entry(sizeof(*u));
|
|
mcs = __xen_mc_entry(sizeof(*u));
|
|
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
|
|
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
|
|
|
|
+ ADD_STATS(mmu_update_histo[1], 1);
|
|
}
|
|
}
|
|
|
|
|
|
u = mcs.args;
|
|
u = mcs.args;
|
|
@@ -265,7 +331,9 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
|
|
/* ptr may be ioremapped for 64-bit pagetable setup */
|
|
/* ptr may be ioremapped for 64-bit pagetable setup */
|
|
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
|
|
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
|
|
u.val = pmd_val_ma(val);
|
|
u.val = pmd_val_ma(val);
|
|
- extend_mmu_update(&u);
|
|
|
|
|
|
+ xen_extend_mmu_update(&u);
|
|
|
|
+
|
|
|
|
+ ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
|
|
|
|
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
|
|
|
|
@@ -274,13 +342,17 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
|
|
|
|
|
|
void xen_set_pmd(pmd_t *ptr, pmd_t val)
|
|
void xen_set_pmd(pmd_t *ptr, pmd_t val)
|
|
{
|
|
{
|
|
|
|
+ ADD_STATS(pmd_update, 1);
|
|
|
|
+
|
|
/* If page is not pinned, we can just update the entry
|
|
/* If page is not pinned, we can just update the entry
|
|
directly */
|
|
directly */
|
|
- if (!page_pinned(ptr)) {
|
|
|
|
|
|
+ if (!xen_page_pinned(ptr)) {
|
|
*ptr = val;
|
|
*ptr = val;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ ADD_STATS(pmd_update_pinned, 1);
|
|
|
|
+
|
|
xen_set_pmd_hyper(ptr, val);
|
|
xen_set_pmd_hyper(ptr, val);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -300,12 +372,18 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
if (mm == &init_mm)
|
|
if (mm == &init_mm)
|
|
preempt_disable();
|
|
preempt_disable();
|
|
|
|
|
|
|
|
+ ADD_STATS(set_pte_at, 1);
|
|
|
|
+// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
|
|
|
|
+ ADD_STATS(set_pte_at_current, mm == current->mm);
|
|
|
|
+ ADD_STATS(set_pte_at_kernel, mm == &init_mm);
|
|
|
|
+
|
|
if (mm == current->mm || mm == &init_mm) {
|
|
if (mm == current->mm || mm == &init_mm) {
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
|
struct multicall_space mcs;
|
|
struct multicall_space mcs;
|
|
mcs = xen_mc_entry(0);
|
|
mcs = xen_mc_entry(0);
|
|
|
|
|
|
MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
|
|
MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
|
|
|
|
+ ADD_STATS(set_pte_at_batched, 1);
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
goto out;
|
|
goto out;
|
|
} else
|
|
} else
|
|
@@ -334,7 +412,10 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
|
|
|
|
|
|
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
|
|
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
|
|
u.val = pte_val_ma(pte);
|
|
u.val = pte_val_ma(pte);
|
|
- extend_mmu_update(&u);
|
|
|
|
|
|
+ xen_extend_mmu_update(&u);
|
|
|
|
+
|
|
|
|
+ ADD_STATS(prot_commit, 1);
|
|
|
|
+ ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
|
|
|
|
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
}
|
|
}
|
|
@@ -400,7 +481,9 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
|
|
/* ptr may be ioremapped for 64-bit pagetable setup */
|
|
/* ptr may be ioremapped for 64-bit pagetable setup */
|
|
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
|
|
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
|
|
u.val = pud_val_ma(val);
|
|
u.val = pud_val_ma(val);
|
|
- extend_mmu_update(&u);
|
|
|
|
|
|
+ xen_extend_mmu_update(&u);
|
|
|
|
+
|
|
|
|
+ ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
|
|
|
|
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
|
|
|
|
|
@@ -409,18 +492,26 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
|
|
|
|
|
|
void xen_set_pud(pud_t *ptr, pud_t val)
|
|
void xen_set_pud(pud_t *ptr, pud_t val)
|
|
{
|
|
{
|
|
|
|
+ ADD_STATS(pud_update, 1);
|
|
|
|
+
|
|
/* If page is not pinned, we can just update the entry
|
|
/* If page is not pinned, we can just update the entry
|
|
directly */
|
|
directly */
|
|
- if (!page_pinned(ptr)) {
|
|
|
|
|
|
+ if (!xen_page_pinned(ptr)) {
|
|
*ptr = val;
|
|
*ptr = val;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ ADD_STATS(pud_update_pinned, 1);
|
|
|
|
+
|
|
xen_set_pud_hyper(ptr, val);
|
|
xen_set_pud_hyper(ptr, val);
|
|
}
|
|
}
|
|
|
|
|
|
void xen_set_pte(pte_t *ptep, pte_t pte)
|
|
void xen_set_pte(pte_t *ptep, pte_t pte)
|
|
{
|
|
{
|
|
|
|
+ ADD_STATS(pte_update, 1);
|
|
|
|
+// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
|
|
|
|
+ ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
|
|
|
|
+
|
|
#ifdef CONFIG_X86_PAE
|
|
#ifdef CONFIG_X86_PAE
|
|
ptep->pte_high = pte.pte_high;
|
|
ptep->pte_high = pte.pte_high;
|
|
smp_wmb();
|
|
smp_wmb();
|
|
@@ -490,7 +581,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
|
|
|
|
|
|
u.ptr = virt_to_machine(ptr).maddr;
|
|
u.ptr = virt_to_machine(ptr).maddr;
|
|
u.val = pgd_val_ma(val);
|
|
u.val = pgd_val_ma(val);
|
|
- extend_mmu_update(&u);
|
|
|
|
|
|
+ xen_extend_mmu_update(&u);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -517,17 +608,22 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
|
|
{
|
|
{
|
|
pgd_t *user_ptr = xen_get_user_pgd(ptr);
|
|
pgd_t *user_ptr = xen_get_user_pgd(ptr);
|
|
|
|
|
|
|
|
+ ADD_STATS(pgd_update, 1);
|
|
|
|
+
|
|
/* If page is not pinned, we can just update the entry
|
|
/* If page is not pinned, we can just update the entry
|
|
directly */
|
|
directly */
|
|
- if (!page_pinned(ptr)) {
|
|
|
|
|
|
+ if (!xen_page_pinned(ptr)) {
|
|
*ptr = val;
|
|
*ptr = val;
|
|
if (user_ptr) {
|
|
if (user_ptr) {
|
|
- WARN_ON(page_pinned(user_ptr));
|
|
|
|
|
|
+ WARN_ON(xen_page_pinned(user_ptr));
|
|
*user_ptr = val;
|
|
*user_ptr = val;
|
|
}
|
|
}
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ ADD_STATS(pgd_update_pinned, 1);
|
|
|
|
+ ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
|
|
|
|
+
|
|
/* If it's pinned, then we can at least batch the kernel and
|
|
/* If it's pinned, then we can at least batch the kernel and
|
|
user updates together. */
|
|
user updates together. */
|
|
xen_mc_batch();
|
|
xen_mc_batch();
|
|
@@ -555,9 +651,12 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
|
|
* For 64-bit, we must skip the Xen hole in the middle of the address
|
|
* For 64-bit, we must skip the Xen hole in the middle of the address
|
|
* space, just after the big x86-64 virtual hole.
|
|
* space, just after the big x86-64 virtual hole.
|
|
*/
|
|
*/
|
|
-static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
|
|
|
|
- unsigned long limit)
|
|
|
|
|
|
+static int xen_pgd_walk(struct mm_struct *mm,
|
|
|
|
+ int (*func)(struct mm_struct *mm, struct page *,
|
|
|
|
+ enum pt_level),
|
|
|
|
+ unsigned long limit)
|
|
{
|
|
{
|
|
|
|
+ pgd_t *pgd = mm->pgd;
|
|
int flush = 0;
|
|
int flush = 0;
|
|
unsigned hole_low, hole_high;
|
|
unsigned hole_low, hole_high;
|
|
unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
|
|
unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
|
|
@@ -590,8 +689,6 @@ static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
|
|
pmdidx_limit = 0;
|
|
pmdidx_limit = 0;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- flush |= (*func)(virt_to_page(pgd), PT_PGD);
|
|
|
|
-
|
|
|
|
for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
|
|
for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
|
|
pud_t *pud;
|
|
pud_t *pud;
|
|
|
|
|
|
@@ -604,7 +701,7 @@ static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
|
|
pud = pud_offset(&pgd[pgdidx], 0);
|
|
pud = pud_offset(&pgd[pgdidx], 0);
|
|
|
|
|
|
if (PTRS_PER_PUD > 1) /* not folded */
|
|
if (PTRS_PER_PUD > 1) /* not folded */
|
|
- flush |= (*func)(virt_to_page(pud), PT_PUD);
|
|
|
|
|
|
+ flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
|
|
|
|
|
|
for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
|
|
for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
|
|
pmd_t *pmd;
|
|
pmd_t *pmd;
|
|
@@ -619,7 +716,7 @@ static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
|
|
pmd = pmd_offset(&pud[pudidx], 0);
|
|
pmd = pmd_offset(&pud[pudidx], 0);
|
|
|
|
|
|
if (PTRS_PER_PMD > 1) /* not folded */
|
|
if (PTRS_PER_PMD > 1) /* not folded */
|
|
- flush |= (*func)(virt_to_page(pmd), PT_PMD);
|
|
|
|
|
|
+ flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
|
|
|
|
|
|
for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
|
|
for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
|
|
struct page *pte;
|
|
struct page *pte;
|
|
@@ -633,28 +730,34 @@ static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
|
|
continue;
|
|
continue;
|
|
|
|
|
|
pte = pmd_page(pmd[pmdidx]);
|
|
pte = pmd_page(pmd[pmdidx]);
|
|
- flush |= (*func)(pte, PT_PTE);
|
|
|
|
|
|
+ flush |= (*func)(mm, pte, PT_PTE);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+
|
|
out:
|
|
out:
|
|
|
|
+ /* Do the top level last, so that the callbacks can use it as
|
|
|
|
+ a cue to do final things like tlb flushes. */
|
|
|
|
+ flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
|
|
|
|
|
|
return flush;
|
|
return flush;
|
|
}
|
|
}
|
|
|
|
|
|
-static spinlock_t *lock_pte(struct page *page)
|
|
|
|
|
|
+/* If we're using split pte locks, then take the page's lock and
|
|
|
|
+ return a pointer to it. Otherwise return NULL. */
|
|
|
|
+static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
|
|
{
|
|
{
|
|
spinlock_t *ptl = NULL;
|
|
spinlock_t *ptl = NULL;
|
|
|
|
|
|
-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
|
|
|
|
|
|
+#if USE_SPLIT_PTLOCKS
|
|
ptl = __pte_lockptr(page);
|
|
ptl = __pte_lockptr(page);
|
|
- spin_lock(ptl);
|
|
|
|
|
|
+ spin_lock_nest_lock(ptl, &mm->page_table_lock);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
return ptl;
|
|
return ptl;
|
|
}
|
|
}
|
|
|
|
|
|
-static void do_unlock(void *v)
|
|
|
|
|
|
+static void xen_pte_unlock(void *v)
|
|
{
|
|
{
|
|
spinlock_t *ptl = v;
|
|
spinlock_t *ptl = v;
|
|
spin_unlock(ptl);
|
|
spin_unlock(ptl);
|
|
@@ -672,7 +775,8 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
|
|
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
|
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
|
}
|
|
}
|
|
|
|
|
|
-static int pin_page(struct page *page, enum pt_level level)
|
|
|
|
|
|
+static int xen_pin_page(struct mm_struct *mm, struct page *page,
|
|
|
|
+ enum pt_level level)
|
|
{
|
|
{
|
|
unsigned pgfl = TestSetPagePinned(page);
|
|
unsigned pgfl = TestSetPagePinned(page);
|
|
int flush;
|
|
int flush;
|
|
@@ -691,21 +795,40 @@ static int pin_page(struct page *page, enum pt_level level)
|
|
|
|
|
|
flush = 0;
|
|
flush = 0;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We need to hold the pagetable lock between the time
|
|
|
|
+ * we make the pagetable RO and when we actually pin
|
|
|
|
+ * it. If we don't, then other users may come in and
|
|
|
|
+ * attempt to update the pagetable by writing it,
|
|
|
|
+ * which will fail because the memory is RO but not
|
|
|
|
+ * pinned, so Xen won't do the trap'n'emulate.
|
|
|
|
+ *
|
|
|
|
+ * If we're using split pte locks, we can't hold the
|
|
|
|
+ * entire pagetable's worth of locks during the
|
|
|
|
+ * traverse, because we may wrap the preempt count (8
|
|
|
|
+ * bits). The solution is to mark RO and pin each PTE
|
|
|
|
+ * page while holding the lock. This means the number
|
|
|
|
+ * of locks we end up holding is never more than a
|
|
|
|
+ * batch size (~32 entries, at present).
|
|
|
|
+ *
|
|
|
|
+ * If we're not using split pte locks, we needn't pin
|
|
|
|
+ * the PTE pages independently, because we're
|
|
|
|
+ * protected by the overall pagetable lock.
|
|
|
|
+ */
|
|
ptl = NULL;
|
|
ptl = NULL;
|
|
if (level == PT_PTE)
|
|
if (level == PT_PTE)
|
|
- ptl = lock_pte(page);
|
|
|
|
|
|
+ ptl = xen_pte_lock(page, mm);
|
|
|
|
|
|
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
|
|
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
|
|
pfn_pte(pfn, PAGE_KERNEL_RO),
|
|
pfn_pte(pfn, PAGE_KERNEL_RO),
|
|
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
|
|
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
|
|
|
|
|
|
- if (level == PT_PTE)
|
|
|
|
|
|
+ if (ptl) {
|
|
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
|
|
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
|
|
|
|
|
|
- if (ptl) {
|
|
|
|
/* Queue a deferred unlock for when this batch
|
|
/* Queue a deferred unlock for when this batch
|
|
is completed. */
|
|
is completed. */
|
|
- xen_mc_callback(do_unlock, ptl);
|
|
|
|
|
|
+ xen_mc_callback(xen_pte_unlock, ptl);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -715,11 +838,11 @@ static int pin_page(struct page *page, enum pt_level level)
|
|
/* This is called just after a mm has been created, but it has not
|
|
/* This is called just after a mm has been created, but it has not
|
|
been used yet. We need to make sure that its pagetable is all
|
|
been used yet. We need to make sure that its pagetable is all
|
|
read-only, and can be pinned. */
|
|
read-only, and can be pinned. */
|
|
-void xen_pgd_pin(pgd_t *pgd)
|
|
|
|
|
|
+static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
{
|
|
xen_mc_batch();
|
|
xen_mc_batch();
|
|
|
|
|
|
- if (pgd_walk(pgd, pin_page, USER_LIMIT)) {
|
|
|
|
|
|
+ if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
|
|
/* re-enable interrupts for kmap_flush_unused */
|
|
/* re-enable interrupts for kmap_flush_unused */
|
|
xen_mc_issue(0);
|
|
xen_mc_issue(0);
|
|
kmap_flush_unused();
|
|
kmap_flush_unused();
|
|
@@ -733,25 +856,35 @@ void xen_pgd_pin(pgd_t *pgd)
|
|
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
|
|
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
|
|
|
|
|
|
if (user_pgd) {
|
|
if (user_pgd) {
|
|
- pin_page(virt_to_page(user_pgd), PT_PGD);
|
|
|
|
|
|
+ xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
|
|
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
|
|
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#else /* CONFIG_X86_32 */
|
|
#else /* CONFIG_X86_32 */
|
|
#ifdef CONFIG_X86_PAE
|
|
#ifdef CONFIG_X86_PAE
|
|
/* Need to make sure unshared kernel PMD is pinnable */
|
|
/* Need to make sure unshared kernel PMD is pinnable */
|
|
- pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
|
|
|
|
|
|
+ xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
|
|
|
|
+ PT_PMD);
|
|
#endif
|
|
#endif
|
|
xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
|
|
xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
|
|
#endif /* CONFIG_X86_64 */
|
|
#endif /* CONFIG_X86_64 */
|
|
xen_mc_issue(0);
|
|
xen_mc_issue(0);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void xen_pgd_pin(struct mm_struct *mm)
|
|
|
|
+{
|
|
|
|
+ __xen_pgd_pin(mm, mm->pgd);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* On save, we need to pin all pagetables to make sure they get their
|
|
* On save, we need to pin all pagetables to make sure they get their
|
|
* mfns turned into pfns. Search the list for any unpinned pgds and pin
|
|
* mfns turned into pfns. Search the list for any unpinned pgds and pin
|
|
* them (unpinned pgds are not currently in use, probably because the
|
|
* them (unpinned pgds are not currently in use, probably because the
|
|
* process is under construction or destruction).
|
|
* process is under construction or destruction).
|
|
|
|
+ *
|
|
|
|
+ * Expected to be called in stop_machine() ("equivalent to taking
|
|
|
|
+ * every spinlock in the system"), so the locking doesn't really
|
|
|
|
+ * matter all that much.
|
|
*/
|
|
*/
|
|
void xen_mm_pin_all(void)
|
|
void xen_mm_pin_all(void)
|
|
{
|
|
{
|
|
@@ -762,7 +895,7 @@ void xen_mm_pin_all(void)
|
|
|
|
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
if (!PagePinned(page)) {
|
|
if (!PagePinned(page)) {
|
|
- xen_pgd_pin((pgd_t *)page_address(page));
|
|
|
|
|
|
+ __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
|
|
SetPageSavePinned(page);
|
|
SetPageSavePinned(page);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -775,7 +908,8 @@ void xen_mm_pin_all(void)
|
|
* that's before we have page structures to store the bits. So do all
|
|
* that's before we have page structures to store the bits. So do all
|
|
* the book-keeping now.
|
|
* the book-keeping now.
|
|
*/
|
|
*/
|
|
-static __init int mark_pinned(struct page *page, enum pt_level level)
|
|
|
|
|
|
+static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
|
|
|
|
+ enum pt_level level)
|
|
{
|
|
{
|
|
SetPagePinned(page);
|
|
SetPagePinned(page);
|
|
return 0;
|
|
return 0;
|
|
@@ -783,10 +917,11 @@ static __init int mark_pinned(struct page *page, enum pt_level level)
|
|
|
|
|
|
void __init xen_mark_init_mm_pinned(void)
|
|
void __init xen_mark_init_mm_pinned(void)
|
|
{
|
|
{
|
|
- pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
|
|
|
|
|
|
+ xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
|
|
}
|
|
}
|
|
|
|
|
|
-static int unpin_page(struct page *page, enum pt_level level)
|
|
|
|
|
|
+static int xen_unpin_page(struct mm_struct *mm, struct page *page,
|
|
|
|
+ enum pt_level level)
|
|
{
|
|
{
|
|
unsigned pgfl = TestClearPagePinned(page);
|
|
unsigned pgfl = TestClearPagePinned(page);
|
|
|
|
|
|
@@ -796,10 +931,18 @@ static int unpin_page(struct page *page, enum pt_level level)
|
|
spinlock_t *ptl = NULL;
|
|
spinlock_t *ptl = NULL;
|
|
struct multicall_space mcs;
|
|
struct multicall_space mcs;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Do the converse to pin_page. If we're using split
|
|
|
|
+ * pte locks, we must be holding the lock for while
|
|
|
|
+ * the pte page is unpinned but still RO to prevent
|
|
|
|
+ * concurrent updates from seeing it in this
|
|
|
|
+ * partially-pinned state.
|
|
|
|
+ */
|
|
if (level == PT_PTE) {
|
|
if (level == PT_PTE) {
|
|
- ptl = lock_pte(page);
|
|
|
|
|
|
+ ptl = xen_pte_lock(page, mm);
|
|
|
|
|
|
- xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
|
|
|
|
|
|
+ if (ptl)
|
|
|
|
+ xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
|
|
}
|
|
}
|
|
|
|
|
|
mcs = __xen_mc_entry(0);
|
|
mcs = __xen_mc_entry(0);
|
|
@@ -810,7 +953,7 @@ static int unpin_page(struct page *page, enum pt_level level)
|
|
|
|
|
|
if (ptl) {
|
|
if (ptl) {
|
|
/* unlock when batch completed */
|
|
/* unlock when batch completed */
|
|
- xen_mc_callback(do_unlock, ptl);
|
|
|
|
|
|
+ xen_mc_callback(xen_pte_unlock, ptl);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -818,7 +961,7 @@ static int unpin_page(struct page *page, enum pt_level level)
|
|
}
|
|
}
|
|
|
|
|
|
/* Release a pagetables pages back as normal RW */
|
|
/* Release a pagetables pages back as normal RW */
|
|
-static void xen_pgd_unpin(pgd_t *pgd)
|
|
|
|
|
|
+static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
{
|
|
xen_mc_batch();
|
|
xen_mc_batch();
|
|
|
|
|
|
@@ -830,21 +973,27 @@ static void xen_pgd_unpin(pgd_t *pgd)
|
|
|
|
|
|
if (user_pgd) {
|
|
if (user_pgd) {
|
|
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
|
|
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
|
|
- unpin_page(virt_to_page(user_pgd), PT_PGD);
|
|
|
|
|
|
+ xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
#ifdef CONFIG_X86_PAE
|
|
/* Need to make sure unshared kernel PMD is unpinned */
|
|
/* Need to make sure unshared kernel PMD is unpinned */
|
|
- pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
|
|
|
|
|
|
+ xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
|
|
|
|
+ PT_PMD);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- pgd_walk(pgd, unpin_page, USER_LIMIT);
|
|
|
|
|
|
+ xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT);
|
|
|
|
|
|
xen_mc_issue(0);
|
|
xen_mc_issue(0);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void xen_pgd_unpin(struct mm_struct *mm)
|
|
|
|
+{
|
|
|
|
+ __xen_pgd_unpin(mm, mm->pgd);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* On resume, undo any pinning done at save, so that the rest of the
|
|
* On resume, undo any pinning done at save, so that the rest of the
|
|
* kernel doesn't see any unexpected pinned pagetables.
|
|
* kernel doesn't see any unexpected pinned pagetables.
|
|
@@ -859,7 +1008,7 @@ void xen_mm_unpin_all(void)
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
if (PageSavePinned(page)) {
|
|
if (PageSavePinned(page)) {
|
|
BUG_ON(!PagePinned(page));
|
|
BUG_ON(!PagePinned(page));
|
|
- xen_pgd_unpin((pgd_t *)page_address(page));
|
|
|
|
|
|
+ __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
|
|
ClearPageSavePinned(page);
|
|
ClearPageSavePinned(page);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -870,14 +1019,14 @@ void xen_mm_unpin_all(void)
|
|
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
{
|
|
{
|
|
spin_lock(&next->page_table_lock);
|
|
spin_lock(&next->page_table_lock);
|
|
- xen_pgd_pin(next->pgd);
|
|
|
|
|
|
+ xen_pgd_pin(next);
|
|
spin_unlock(&next->page_table_lock);
|
|
spin_unlock(&next->page_table_lock);
|
|
}
|
|
}
|
|
|
|
|
|
void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
|
void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
|
{
|
|
{
|
|
spin_lock(&mm->page_table_lock);
|
|
spin_lock(&mm->page_table_lock);
|
|
- xen_pgd_pin(mm->pgd);
|
|
|
|
|
|
+ xen_pgd_pin(mm);
|
|
spin_unlock(&mm->page_table_lock);
|
|
spin_unlock(&mm->page_table_lock);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -907,7 +1056,7 @@ static void drop_other_mm_ref(void *info)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void drop_mm_ref(struct mm_struct *mm)
|
|
|
|
|
|
+static void xen_drop_mm_ref(struct mm_struct *mm)
|
|
{
|
|
{
|
|
cpumask_t mask;
|
|
cpumask_t mask;
|
|
unsigned cpu;
|
|
unsigned cpu;
|
|
@@ -937,7 +1086,7 @@ static void drop_mm_ref(struct mm_struct *mm)
|
|
smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
|
|
smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
-static void drop_mm_ref(struct mm_struct *mm)
|
|
|
|
|
|
+static void xen_drop_mm_ref(struct mm_struct *mm)
|
|
{
|
|
{
|
|
if (current->active_mm == mm)
|
|
if (current->active_mm == mm)
|
|
load_cr3(swapper_pg_dir);
|
|
load_cr3(swapper_pg_dir);
|
|
@@ -961,14 +1110,77 @@ static void drop_mm_ref(struct mm_struct *mm)
|
|
void xen_exit_mmap(struct mm_struct *mm)
|
|
void xen_exit_mmap(struct mm_struct *mm)
|
|
{
|
|
{
|
|
get_cpu(); /* make sure we don't move around */
|
|
get_cpu(); /* make sure we don't move around */
|
|
- drop_mm_ref(mm);
|
|
|
|
|
|
+ xen_drop_mm_ref(mm);
|
|
put_cpu();
|
|
put_cpu();
|
|
|
|
|
|
spin_lock(&mm->page_table_lock);
|
|
spin_lock(&mm->page_table_lock);
|
|
|
|
|
|
/* pgd may not be pinned in the error exit path of execve */
|
|
/* pgd may not be pinned in the error exit path of execve */
|
|
- if (page_pinned(mm->pgd))
|
|
|
|
- xen_pgd_unpin(mm->pgd);
|
|
|
|
|
|
+ if (xen_page_pinned(mm->pgd))
|
|
|
|
+ xen_pgd_unpin(mm);
|
|
|
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
spin_unlock(&mm->page_table_lock);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_XEN_DEBUG_FS
|
|
|
|
+
|
|
|
|
+static struct dentry *d_mmu_debug;
|
|
|
|
+
|
|
|
|
+static int __init xen_mmu_debugfs(void)
|
|
|
|
+{
|
|
|
|
+ struct dentry *d_xen = xen_init_debugfs();
|
|
|
|
+
|
|
|
|
+ if (d_xen == NULL)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ d_mmu_debug = debugfs_create_dir("mmu", d_xen);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
|
|
|
|
+ debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.pgd_update_pinned);
|
|
|
|
+ debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.pgd_update_pinned);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
|
|
|
|
+ debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.pud_update_pinned);
|
|
|
|
+ debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.pud_update_pinned);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
|
|
|
|
+ debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.pmd_update_pinned);
|
|
|
|
+ debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.pmd_update_pinned);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
|
|
|
|
+// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
|
|
|
|
+// &mmu_stats.pte_update_pinned);
|
|
|
|
+ debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.pte_update_pinned);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
|
|
|
|
+ debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.mmu_update_extended);
|
|
|
|
+ xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
|
|
|
|
+ mmu_stats.mmu_update_histo, 20);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
|
|
|
|
+ debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.set_pte_at_batched);
|
|
|
|
+ debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.set_pte_at_current);
|
|
|
|
+ debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.set_pte_at_kernel);
|
|
|
|
+
|
|
|
|
+ debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
|
|
|
|
+ debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
|
|
|
|
+ &mmu_stats.prot_commit_batched);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+fs_initcall(xen_mmu_debugfs);
|
|
|
|
+
|
|
|
|
+#endif /* CONFIG_XEN_DEBUG_FS */
|