|
@@ -311,11 +311,6 @@ static int is_large_pte(u64 pte)
|
|
|
return pte & PT_PAGE_SIZE_MASK;
|
|
|
}
|
|
|
|
|
|
-static int is_rmap_spte(u64 pte)
|
|
|
-{
|
|
|
- return is_shadow_present_pte(pte);
|
|
|
-}
|
|
|
-
|
|
|
static int is_last_spte(u64 pte, int level)
|
|
|
{
|
|
|
if (level == PT_PAGE_TABLE_LEVEL)
|
|
@@ -540,7 +535,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
|
|
|
u64 old_spte = *sptep;
|
|
|
bool ret = false;
|
|
|
|
|
|
- WARN_ON(!is_rmap_spte(new_spte));
|
|
|
+ WARN_ON(!is_shadow_present_pte(new_spte));
|
|
|
|
|
|
if (!is_shadow_present_pte(old_spte)) {
|
|
|
mmu_spte_set(sptep, new_spte);
|
|
@@ -595,7 +590,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
|
|
|
else
|
|
|
old_spte = __update_clear_spte_slow(sptep, 0ull);
|
|
|
|
|
|
- if (!is_rmap_spte(old_spte))
|
|
|
+ if (!is_shadow_present_pte(old_spte))
|
|
|
return 0;
|
|
|
|
|
|
pfn = spte_to_pfn(old_spte);
|
|
@@ -909,36 +904,35 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Pte mapping structures:
|
|
|
+ * About rmap_head encoding:
|
|
|
*
|
|
|
- * If pte_list bit zero is zero, then pte_list point to the spte.
|
|
|
- *
|
|
|
- * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
|
|
|
+ * If the bit zero of rmap_head->val is clear, then it points to the only spte
|
|
|
+ * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
|
|
|
* pte_list_desc containing more mappings.
|
|
|
- *
|
|
|
- * Returns the number of pte entries before the spte was added or zero if
|
|
|
- * the spte was not added.
|
|
|
- *
|
|
|
+ */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Returns the number of pointers in the rmap chain, not counting the new one.
|
|
|
*/
|
|
|
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
|
|
|
- unsigned long *pte_list)
|
|
|
+ struct kvm_rmap_head *rmap_head)
|
|
|
{
|
|
|
struct pte_list_desc *desc;
|
|
|
int i, count = 0;
|
|
|
|
|
|
- if (!*pte_list) {
|
|
|
+ if (!rmap_head->val) {
|
|
|
rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
|
|
|
- *pte_list = (unsigned long)spte;
|
|
|
- } else if (!(*pte_list & 1)) {
|
|
|
+ rmap_head->val = (unsigned long)spte;
|
|
|
+ } else if (!(rmap_head->val & 1)) {
|
|
|
rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
|
|
|
desc = mmu_alloc_pte_list_desc(vcpu);
|
|
|
- desc->sptes[0] = (u64 *)*pte_list;
|
|
|
+ desc->sptes[0] = (u64 *)rmap_head->val;
|
|
|
desc->sptes[1] = spte;
|
|
|
- *pte_list = (unsigned long)desc | 1;
|
|
|
+ rmap_head->val = (unsigned long)desc | 1;
|
|
|
++count;
|
|
|
} else {
|
|
|
rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
|
|
|
- desc = (struct pte_list_desc *)(*pte_list & ~1ul);
|
|
|
+ desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
|
|
|
while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
|
|
|
desc = desc->more;
|
|
|
count += PTE_LIST_EXT;
|
|
@@ -955,8 +949,9 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
|
|
|
- int i, struct pte_list_desc *prev_desc)
|
|
|
+pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
|
|
|
+ struct pte_list_desc *desc, int i,
|
|
|
+ struct pte_list_desc *prev_desc)
|
|
|
{
|
|
|
int j;
|
|
|
|
|
@@ -967,43 +962,43 @@ pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
|
|
|
if (j != 0)
|
|
|
return;
|
|
|
if (!prev_desc && !desc->more)
|
|
|
- *pte_list = (unsigned long)desc->sptes[0];
|
|
|
+ rmap_head->val = (unsigned long)desc->sptes[0];
|
|
|
else
|
|
|
if (prev_desc)
|
|
|
prev_desc->more = desc->more;
|
|
|
else
|
|
|
- *pte_list = (unsigned long)desc->more | 1;
|
|
|
+ rmap_head->val = (unsigned long)desc->more | 1;
|
|
|
mmu_free_pte_list_desc(desc);
|
|
|
}
|
|
|
|
|
|
-static void pte_list_remove(u64 *spte, unsigned long *pte_list)
|
|
|
+static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
|
|
|
{
|
|
|
struct pte_list_desc *desc;
|
|
|
struct pte_list_desc *prev_desc;
|
|
|
int i;
|
|
|
|
|
|
- if (!*pte_list) {
|
|
|
+ if (!rmap_head->val) {
|
|
|
printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
|
|
|
BUG();
|
|
|
- } else if (!(*pte_list & 1)) {
|
|
|
+ } else if (!(rmap_head->val & 1)) {
|
|
|
rmap_printk("pte_list_remove: %p 1->0\n", spte);
|
|
|
- if ((u64 *)*pte_list != spte) {
|
|
|
+ if ((u64 *)rmap_head->val != spte) {
|
|
|
printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
|
|
|
BUG();
|
|
|
}
|
|
|
- *pte_list = 0;
|
|
|
+ rmap_head->val = 0;
|
|
|
} else {
|
|
|
rmap_printk("pte_list_remove: %p many->many\n", spte);
|
|
|
- desc = (struct pte_list_desc *)(*pte_list & ~1ul);
|
|
|
+ desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
|
|
|
prev_desc = NULL;
|
|
|
while (desc) {
|
|
|
- for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
|
|
|
+ for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
|
|
|
if (desc->sptes[i] == spte) {
|
|
|
- pte_list_desc_remove_entry(pte_list,
|
|
|
- desc, i,
|
|
|
- prev_desc);
|
|
|
+ pte_list_desc_remove_entry(rmap_head,
|
|
|
+ desc, i, prev_desc);
|
|
|
return;
|
|
|
}
|
|
|
+ }
|
|
|
prev_desc = desc;
|
|
|
desc = desc->more;
|
|
|
}
|
|
@@ -1012,28 +1007,8 @@ static void pte_list_remove(u64 *spte, unsigned long *pte_list)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-typedef void (*pte_list_walk_fn) (u64 *spte);
|
|
|
-static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
|
|
|
-{
|
|
|
- struct pte_list_desc *desc;
|
|
|
- int i;
|
|
|
-
|
|
|
- if (!*pte_list)
|
|
|
- return;
|
|
|
-
|
|
|
- if (!(*pte_list & 1))
|
|
|
- return fn((u64 *)*pte_list);
|
|
|
-
|
|
|
- desc = (struct pte_list_desc *)(*pte_list & ~1ul);
|
|
|
- while (desc) {
|
|
|
- for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
|
|
|
- fn(desc->sptes[i]);
|
|
|
- desc = desc->more;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
|
|
|
- struct kvm_memory_slot *slot)
|
|
|
+static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
|
|
|
+ struct kvm_memory_slot *slot)
|
|
|
{
|
|
|
unsigned long idx;
|
|
|
|
|
@@ -1041,10 +1016,8 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
|
|
|
return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Take gfn and return the reverse mapping to it.
|
|
|
- */
|
|
|
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
|
|
|
+static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
|
|
|
+ struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
struct kvm_memslots *slots;
|
|
|
struct kvm_memory_slot *slot;
|
|
@@ -1065,24 +1038,24 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
|
|
|
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|
|
{
|
|
|
struct kvm_mmu_page *sp;
|
|
|
- unsigned long *rmapp;
|
|
|
+ struct kvm_rmap_head *rmap_head;
|
|
|
|
|
|
sp = page_header(__pa(spte));
|
|
|
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
|
|
|
- rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
|
|
- return pte_list_add(vcpu, spte, rmapp);
|
|
|
+ rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
|
|
+ return pte_list_add(vcpu, spte, rmap_head);
|
|
|
}
|
|
|
|
|
|
static void rmap_remove(struct kvm *kvm, u64 *spte)
|
|
|
{
|
|
|
struct kvm_mmu_page *sp;
|
|
|
gfn_t gfn;
|
|
|
- unsigned long *rmapp;
|
|
|
+ struct kvm_rmap_head *rmap_head;
|
|
|
|
|
|
sp = page_header(__pa(spte));
|
|
|
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
|
|
|
- rmapp = gfn_to_rmap(kvm, gfn, sp);
|
|
|
- pte_list_remove(spte, rmapp);
|
|
|
+ rmap_head = gfn_to_rmap(kvm, gfn, sp);
|
|
|
+ pte_list_remove(spte, rmap_head);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1102,19 +1075,26 @@ struct rmap_iterator {
|
|
|
*
|
|
|
* Returns sptep if found, NULL otherwise.
|
|
|
*/
|
|
|
-static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
|
|
|
+static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
|
|
|
+ struct rmap_iterator *iter)
|
|
|
{
|
|
|
- if (!rmap)
|
|
|
+ u64 *sptep;
|
|
|
+
|
|
|
+ if (!rmap_head->val)
|
|
|
return NULL;
|
|
|
|
|
|
- if (!(rmap & 1)) {
|
|
|
+ if (!(rmap_head->val & 1)) {
|
|
|
iter->desc = NULL;
|
|
|
- return (u64 *)rmap;
|
|
|
+ sptep = (u64 *)rmap_head->val;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
- iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
|
|
|
+ iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
|
|
|
iter->pos = 0;
|
|
|
- return iter->desc->sptes[iter->pos];
|
|
|
+ sptep = iter->desc->sptes[iter->pos];
|
|
|
+out:
|
|
|
+ BUG_ON(!is_shadow_present_pte(*sptep));
|
|
|
+ return sptep;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1124,14 +1104,14 @@ static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
|
|
|
*/
|
|
|
static u64 *rmap_get_next(struct rmap_iterator *iter)
|
|
|
{
|
|
|
+ u64 *sptep;
|
|
|
+
|
|
|
if (iter->desc) {
|
|
|
if (iter->pos < PTE_LIST_EXT - 1) {
|
|
|
- u64 *sptep;
|
|
|
-
|
|
|
++iter->pos;
|
|
|
sptep = iter->desc->sptes[iter->pos];
|
|
|
if (sptep)
|
|
|
- return sptep;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
iter->desc = iter->desc->more;
|
|
@@ -1139,17 +1119,20 @@ static u64 *rmap_get_next(struct rmap_iterator *iter)
|
|
|
if (iter->desc) {
|
|
|
iter->pos = 0;
|
|
|
/* desc->sptes[0] cannot be NULL */
|
|
|
- return iter->desc->sptes[iter->pos];
|
|
|
+ sptep = iter->desc->sptes[iter->pos];
|
|
|
+ goto out;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
return NULL;
|
|
|
+out:
|
|
|
+ BUG_ON(!is_shadow_present_pte(*sptep));
|
|
|
+ return sptep;
|
|
|
}
|
|
|
|
|
|
-#define for_each_rmap_spte(_rmap_, _iter_, _spte_) \
|
|
|
- for (_spte_ = rmap_get_first(*_rmap_, _iter_); \
|
|
|
- _spte_ && ({BUG_ON(!is_shadow_present_pte(*_spte_)); 1;}); \
|
|
|
- _spte_ = rmap_get_next(_iter_))
|
|
|
+#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
|
|
|
+ for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
|
|
|
+ _spte_; _spte_ = rmap_get_next(_iter_))
|
|
|
|
|
|
static void drop_spte(struct kvm *kvm, u64 *sptep)
|
|
|
{
|
|
@@ -1207,14 +1190,15 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
|
|
|
return mmu_spte_update(sptep, spte);
|
|
|
}
|
|
|
|
|
|
-static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static bool __rmap_write_protect(struct kvm *kvm,
|
|
|
+ struct kvm_rmap_head *rmap_head,
|
|
|
bool pt_protect)
|
|
|
{
|
|
|
u64 *sptep;
|
|
|
struct rmap_iterator iter;
|
|
|
bool flush = false;
|
|
|
|
|
|
- for_each_rmap_spte(rmapp, &iter, sptep)
|
|
|
+ for_each_rmap_spte(rmap_head, &iter, sptep)
|
|
|
flush |= spte_write_protect(kvm, sptep, pt_protect);
|
|
|
|
|
|
return flush;
|
|
@@ -1231,13 +1215,13 @@ static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep)
|
|
|
return mmu_spte_update(sptep, spte);
|
|
|
}
|
|
|
|
|
|
-static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
|
|
|
+static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
|
|
|
{
|
|
|
u64 *sptep;
|
|
|
struct rmap_iterator iter;
|
|
|
bool flush = false;
|
|
|
|
|
|
- for_each_rmap_spte(rmapp, &iter, sptep)
|
|
|
+ for_each_rmap_spte(rmap_head, &iter, sptep)
|
|
|
flush |= spte_clear_dirty(kvm, sptep);
|
|
|
|
|
|
return flush;
|
|
@@ -1254,13 +1238,13 @@ static bool spte_set_dirty(struct kvm *kvm, u64 *sptep)
|
|
|
return mmu_spte_update(sptep, spte);
|
|
|
}
|
|
|
|
|
|
-static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp)
|
|
|
+static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
|
|
|
{
|
|
|
u64 *sptep;
|
|
|
struct rmap_iterator iter;
|
|
|
bool flush = false;
|
|
|
|
|
|
- for_each_rmap_spte(rmapp, &iter, sptep)
|
|
|
+ for_each_rmap_spte(rmap_head, &iter, sptep)
|
|
|
flush |= spte_set_dirty(kvm, sptep);
|
|
|
|
|
|
return flush;
|
|
@@ -1280,12 +1264,12 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
|
|
|
struct kvm_memory_slot *slot,
|
|
|
gfn_t gfn_offset, unsigned long mask)
|
|
|
{
|
|
|
- unsigned long *rmapp;
|
|
|
+ struct kvm_rmap_head *rmap_head;
|
|
|
|
|
|
while (mask) {
|
|
|
- rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
|
|
|
- PT_PAGE_TABLE_LEVEL, slot);
|
|
|
- __rmap_write_protect(kvm, rmapp, false);
|
|
|
+ rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
|
|
|
+ PT_PAGE_TABLE_LEVEL, slot);
|
|
|
+ __rmap_write_protect(kvm, rmap_head, false);
|
|
|
|
|
|
/* clear the first set bit */
|
|
|
mask &= mask - 1;
|
|
@@ -1305,12 +1289,12 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|
|
struct kvm_memory_slot *slot,
|
|
|
gfn_t gfn_offset, unsigned long mask)
|
|
|
{
|
|
|
- unsigned long *rmapp;
|
|
|
+ struct kvm_rmap_head *rmap_head;
|
|
|
|
|
|
while (mask) {
|
|
|
- rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
|
|
|
- PT_PAGE_TABLE_LEVEL, slot);
|
|
|
- __rmap_clear_dirty(kvm, rmapp);
|
|
|
+ rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
|
|
|
+ PT_PAGE_TABLE_LEVEL, slot);
|
|
|
+ __rmap_clear_dirty(kvm, rmap_head);
|
|
|
|
|
|
/* clear the first set bit */
|
|
|
mask &= mask - 1;
|
|
@@ -1342,28 +1326,27 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
|
|
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
|
|
|
{
|
|
|
struct kvm_memory_slot *slot;
|
|
|
- unsigned long *rmapp;
|
|
|
+ struct kvm_rmap_head *rmap_head;
|
|
|
int i;
|
|
|
bool write_protected = false;
|
|
|
|
|
|
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
|
|
|
|
|
|
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
|
|
- rmapp = __gfn_to_rmap(gfn, i, slot);
|
|
|
- write_protected |= __rmap_write_protect(vcpu->kvm, rmapp, true);
|
|
|
+ rmap_head = __gfn_to_rmap(gfn, i, slot);
|
|
|
+ write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, true);
|
|
|
}
|
|
|
|
|
|
return write_protected;
|
|
|
}
|
|
|
|
|
|
-static bool kvm_zap_rmapp(struct kvm *kvm, unsigned long *rmapp)
|
|
|
+static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
|
|
|
{
|
|
|
u64 *sptep;
|
|
|
struct rmap_iterator iter;
|
|
|
bool flush = false;
|
|
|
|
|
|
- while ((sptep = rmap_get_first(*rmapp, &iter))) {
|
|
|
- BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
|
|
+ while ((sptep = rmap_get_first(rmap_head, &iter))) {
|
|
|
rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
|
|
|
|
|
|
drop_spte(kvm, sptep);
|
|
@@ -1373,14 +1356,14 @@ static bool kvm_zap_rmapp(struct kvm *kvm, unsigned long *rmapp)
|
|
|
return flush;
|
|
|
}
|
|
|
|
|
|
-static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
|
|
struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
|
|
unsigned long data)
|
|
|
{
|
|
|
- return kvm_zap_rmapp(kvm, rmapp);
|
|
|
+ return kvm_zap_rmapp(kvm, rmap_head);
|
|
|
}
|
|
|
|
|
|
-static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
|
|
struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
|
|
unsigned long data)
|
|
|
{
|
|
@@ -1395,7 +1378,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
new_pfn = pte_pfn(*ptep);
|
|
|
|
|
|
restart:
|
|
|
- for_each_rmap_spte(rmapp, &iter, sptep) {
|
|
|
+ for_each_rmap_spte(rmap_head, &iter, sptep) {
|
|
|
rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
|
|
|
sptep, *sptep, gfn, level);
|
|
|
|
|
@@ -1433,11 +1416,11 @@ struct slot_rmap_walk_iterator {
|
|
|
|
|
|
/* output fields. */
|
|
|
gfn_t gfn;
|
|
|
- unsigned long *rmap;
|
|
|
+ struct kvm_rmap_head *rmap;
|
|
|
int level;
|
|
|
|
|
|
/* private field. */
|
|
|
- unsigned long *end_rmap;
|
|
|
+ struct kvm_rmap_head *end_rmap;
|
|
|
};
|
|
|
|
|
|
static void
|
|
@@ -1496,7 +1479,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
|
unsigned long end,
|
|
|
unsigned long data,
|
|
|
int (*handler)(struct kvm *kvm,
|
|
|
- unsigned long *rmapp,
|
|
|
+ struct kvm_rmap_head *rmap_head,
|
|
|
struct kvm_memory_slot *slot,
|
|
|
gfn_t gfn,
|
|
|
int level,
|
|
@@ -1540,7 +1523,8 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
|
|
|
|
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|
|
unsigned long data,
|
|
|
- int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+ int (*handler)(struct kvm *kvm,
|
|
|
+ struct kvm_rmap_head *rmap_head,
|
|
|
struct kvm_memory_slot *slot,
|
|
|
gfn_t gfn, int level,
|
|
|
unsigned long data))
|
|
@@ -1563,7 +1547,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|
|
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
|
|
}
|
|
|
|
|
|
-static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
|
|
struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
|
|
unsigned long data)
|
|
|
{
|
|
@@ -1573,18 +1557,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
|
|
|
BUG_ON(!shadow_accessed_mask);
|
|
|
|
|
|
- for_each_rmap_spte(rmapp, &iter, sptep)
|
|
|
+ for_each_rmap_spte(rmap_head, &iter, sptep) {
|
|
|
if (*sptep & shadow_accessed_mask) {
|
|
|
young = 1;
|
|
|
clear_bit((ffs(shadow_accessed_mask) - 1),
|
|
|
(unsigned long *)sptep);
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
trace_kvm_age_page(gfn, level, slot, young);
|
|
|
return young;
|
|
|
}
|
|
|
|
|
|
-static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
|
|
|
struct kvm_memory_slot *slot, gfn_t gfn,
|
|
|
int level, unsigned long data)
|
|
|
{
|
|
@@ -1600,11 +1585,12 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
if (!shadow_accessed_mask)
|
|
|
goto out;
|
|
|
|
|
|
- for_each_rmap_spte(rmapp, &iter, sptep)
|
|
|
+ for_each_rmap_spte(rmap_head, &iter, sptep) {
|
|
|
if (*sptep & shadow_accessed_mask) {
|
|
|
young = 1;
|
|
|
break;
|
|
|
}
|
|
|
+ }
|
|
|
out:
|
|
|
return young;
|
|
|
}
|
|
@@ -1613,14 +1599,14 @@ out:
|
|
|
|
|
|
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|
|
{
|
|
|
- unsigned long *rmapp;
|
|
|
+ struct kvm_rmap_head *rmap_head;
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
sp = page_header(__pa(spte));
|
|
|
|
|
|
- rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
|
|
+ rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
|
|
|
|
|
|
- kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
|
|
|
+ kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
|
|
|
kvm_flush_remote_tlbs(vcpu->kvm);
|
|
|
}
|
|
|
|
|
@@ -1720,8 +1706,7 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
|
|
|
mmu_spte_clear_no_track(parent_pte);
|
|
|
}
|
|
|
|
|
|
-static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
|
|
- u64 *parent_pte, int direct)
|
|
|
+static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
|
|
|
{
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
@@ -1737,8 +1722,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
|
|
* this feature. See the comments in kvm_zap_obsolete_pages().
|
|
|
*/
|
|
|
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
|
|
|
- sp->parent_ptes = 0;
|
|
|
- mmu_page_add_parent_pte(vcpu, sp, parent_pte);
|
|
|
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
|
|
|
return sp;
|
|
|
}
|
|
@@ -1746,7 +1729,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
|
|
static void mark_unsync(u64 *spte);
|
|
|
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
- pte_list_walk(&sp->parent_ptes, mark_unsync);
|
|
|
+ u64 *sptep;
|
|
|
+ struct rmap_iterator iter;
|
|
|
+
|
|
|
+ for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
|
|
|
+ mark_unsync(sptep);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void mark_unsync(u64 *spte)
|
|
@@ -1806,6 +1794,13 @@ static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
|
|
|
return (pvec->nr == KVM_PAGE_ARRAY_NR);
|
|
|
}
|
|
|
|
|
|
+static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
|
|
|
+{
|
|
|
+ --sp->unsync_children;
|
|
|
+ WARN_ON((int)sp->unsync_children < 0);
|
|
|
+ __clear_bit(idx, sp->unsync_child_bitmap);
|
|
|
+}
|
|
|
+
|
|
|
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
|
|
|
struct kvm_mmu_pages *pvec)
|
|
|
{
|
|
@@ -1815,8 +1810,10 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
|
|
|
struct kvm_mmu_page *child;
|
|
|
u64 ent = sp->spt[i];
|
|
|
|
|
|
- if (!is_shadow_present_pte(ent) || is_large_pte(ent))
|
|
|
- goto clear_child_bitmap;
|
|
|
+ if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
|
|
|
+ clear_unsync_child_bit(sp, i);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
child = page_header(ent & PT64_BASE_ADDR_MASK);
|
|
|
|
|
@@ -1825,28 +1822,21 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
|
|
|
return -ENOSPC;
|
|
|
|
|
|
ret = __mmu_unsync_walk(child, pvec);
|
|
|
- if (!ret)
|
|
|
- goto clear_child_bitmap;
|
|
|
- else if (ret > 0)
|
|
|
+ if (!ret) {
|
|
|
+ clear_unsync_child_bit(sp, i);
|
|
|
+ continue;
|
|
|
+ } else if (ret > 0) {
|
|
|
nr_unsync_leaf += ret;
|
|
|
- else
|
|
|
+ } else
|
|
|
return ret;
|
|
|
} else if (child->unsync) {
|
|
|
nr_unsync_leaf++;
|
|
|
if (mmu_pages_add(pvec, child, i))
|
|
|
return -ENOSPC;
|
|
|
} else
|
|
|
- goto clear_child_bitmap;
|
|
|
-
|
|
|
- continue;
|
|
|
-
|
|
|
-clear_child_bitmap:
|
|
|
- __clear_bit(i, sp->unsync_child_bitmap);
|
|
|
- sp->unsync_children--;
|
|
|
- WARN_ON((int)sp->unsync_children < 0);
|
|
|
+ clear_unsync_child_bit(sp, i);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
return nr_unsync_leaf;
|
|
|
}
|
|
|
|
|
@@ -2009,9 +1999,7 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
|
|
|
if (!sp)
|
|
|
return;
|
|
|
|
|
|
- --sp->unsync_children;
|
|
|
- WARN_ON((int)sp->unsync_children < 0);
|
|
|
- __clear_bit(idx, sp->unsync_child_bitmap);
|
|
|
+ clear_unsync_child_bit(sp, idx);
|
|
|
level++;
|
|
|
} while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
|
|
|
}
|
|
@@ -2053,14 +2041,6 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void init_shadow_page_table(struct kvm_mmu_page *sp)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
|
|
|
- sp->spt[i] = 0ull;
|
|
|
-}
|
|
|
-
|
|
|
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
sp->write_flooding_count = 0;
|
|
@@ -2083,8 +2063,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|
|
gva_t gaddr,
|
|
|
unsigned level,
|
|
|
int direct,
|
|
|
- unsigned access,
|
|
|
- u64 *parent_pte)
|
|
|
+ unsigned access)
|
|
|
{
|
|
|
union kvm_mmu_page_role role;
|
|
|
unsigned quadrant;
|
|
@@ -2116,21 +2095,18 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|
|
if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
|
|
|
break;
|
|
|
|
|
|
- mmu_page_add_parent_pte(vcpu, sp, parent_pte);
|
|
|
- if (sp->unsync_children) {
|
|
|
+ if (sp->unsync_children)
|
|
|
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
|
|
|
- kvm_mmu_mark_parents_unsync(sp);
|
|
|
- } else if (sp->unsync)
|
|
|
- kvm_mmu_mark_parents_unsync(sp);
|
|
|
|
|
|
__clear_sp_write_flooding_count(sp);
|
|
|
trace_kvm_mmu_get_page(sp, false);
|
|
|
return sp;
|
|
|
}
|
|
|
+
|
|
|
++vcpu->kvm->stat.mmu_cache_miss;
|
|
|
- sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
|
|
|
- if (!sp)
|
|
|
- return sp;
|
|
|
+
|
|
|
+ sp = kvm_mmu_alloc_page(vcpu, direct);
|
|
|
+
|
|
|
sp->gfn = gfn;
|
|
|
sp->role = role;
|
|
|
hlist_add_head(&sp->hash_link,
|
|
@@ -2144,7 +2120,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
|
|
account_shadowed(vcpu->kvm, sp);
|
|
|
}
|
|
|
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
|
|
|
- init_shadow_page_table(sp);
|
|
|
+ clear_page(sp->spt);
|
|
|
trace_kvm_mmu_get_page(sp, true);
|
|
|
return sp;
|
|
|
}
|
|
@@ -2198,7 +2174,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
|
|
|
return __shadow_walk_next(iterator, *iterator->sptep);
|
|
|
}
|
|
|
|
|
|
-static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed)
|
|
|
+static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
+ struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
u64 spte;
|
|
|
|
|
@@ -2206,12 +2183,14 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed)
|
|
|
VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
|
|
|
|
|
|
spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
|
|
|
- shadow_user_mask | shadow_x_mask;
|
|
|
-
|
|
|
- if (accessed)
|
|
|
- spte |= shadow_accessed_mask;
|
|
|
+ shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
|
|
|
|
|
|
mmu_spte_set(sptep, spte);
|
|
|
+
|
|
|
+ mmu_page_add_parent_pte(vcpu, sp, sptep);
|
|
|
+
|
|
|
+ if (sp->unsync_children || sp->unsync)
|
|
|
+ mark_unsync(sptep);
|
|
|
}
|
|
|
|
|
|
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
@@ -2270,17 +2249,12 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
|
|
|
mmu_page_zap_pte(kvm, sp, sp->spt + i);
|
|
|
}
|
|
|
|
|
|
-static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
|
|
|
-{
|
|
|
- mmu_page_remove_parent_pte(sp, parent_pte);
|
|
|
-}
|
|
|
-
|
|
|
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
u64 *sptep;
|
|
|
struct rmap_iterator iter;
|
|
|
|
|
|
- while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
|
|
|
+ while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
|
|
|
drop_parent_pte(sp, sptep);
|
|
|
}
|
|
|
|
|
@@ -2564,18 +2538,18 @@ done:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
- unsigned pte_access, int write_fault, int *emulate,
|
|
|
- int level, gfn_t gfn, pfn_t pfn, bool speculative,
|
|
|
- bool host_writable)
|
|
|
+static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
|
|
+ int write_fault, int level, gfn_t gfn, pfn_t pfn,
|
|
|
+ bool speculative, bool host_writable)
|
|
|
{
|
|
|
int was_rmapped = 0;
|
|
|
int rmap_count;
|
|
|
+ bool emulate = false;
|
|
|
|
|
|
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
|
|
|
*sptep, write_fault, gfn);
|
|
|
|
|
|
- if (is_rmap_spte(*sptep)) {
|
|
|
+ if (is_shadow_present_pte(*sptep)) {
|
|
|
/*
|
|
|
* If we overwrite a PTE page pointer with a 2MB PMD, unlink
|
|
|
* the parent of the now unreachable PTE.
|
|
@@ -2600,12 +2574,12 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
|
|
|
true, host_writable)) {
|
|
|
if (write_fault)
|
|
|
- *emulate = 1;
|
|
|
+ emulate = true;
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
|
|
}
|
|
|
|
|
|
- if (unlikely(is_mmio_spte(*sptep) && emulate))
|
|
|
- *emulate = 1;
|
|
|
+ if (unlikely(is_mmio_spte(*sptep)))
|
|
|
+ emulate = true;
|
|
|
|
|
|
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
|
|
|
pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
|
|
@@ -2624,6 +2598,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
}
|
|
|
|
|
|
kvm_release_pfn_clean(pfn);
|
|
|
+
|
|
|
+ return emulate;
|
|
|
}
|
|
|
|
|
|
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
@@ -2658,9 +2634,8 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
|
|
|
return -1;
|
|
|
|
|
|
for (i = 0; i < ret; i++, gfn++, start++)
|
|
|
- mmu_set_spte(vcpu, start, access, 0, NULL,
|
|
|
- sp->role.level, gfn, page_to_pfn(pages[i]),
|
|
|
- true, true);
|
|
|
+ mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
|
|
|
+ page_to_pfn(pages[i]), true, true);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2708,9 +2683,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
|
|
__direct_pte_prefetch(vcpu, sp, sptep);
|
|
|
}
|
|
|
|
|
|
-static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
- int map_writable, int level, gfn_t gfn, pfn_t pfn,
|
|
|
- bool prefault)
|
|
|
+static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
|
|
|
+ int level, gfn_t gfn, pfn_t pfn, bool prefault)
|
|
|
{
|
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
|
struct kvm_mmu_page *sp;
|
|
@@ -2722,9 +2696,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
|
|
|
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
|
|
|
if (iterator.level == level) {
|
|
|
- mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
|
|
|
- write, &emulate, level, gfn, pfn,
|
|
|
- prefault, map_writable);
|
|
|
+ emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
|
|
|
+ write, level, gfn, pfn, prefault,
|
|
|
+ map_writable);
|
|
|
direct_pte_prefetch(vcpu, iterator.sptep);
|
|
|
++vcpu->stat.pf_fixed;
|
|
|
break;
|
|
@@ -2737,10 +2711,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
|
|
|
pseudo_gfn = base_addr >> PAGE_SHIFT;
|
|
|
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
|
|
|
- iterator.level - 1,
|
|
|
- 1, ACC_ALL, iterator.sptep);
|
|
|
+ iterator.level - 1, 1, ACC_ALL);
|
|
|
|
|
|
- link_shadow_page(iterator.sptep, sp, true);
|
|
|
+ link_shadow_page(vcpu, iterator.sptep, sp);
|
|
|
}
|
|
|
}
|
|
|
return emulate;
|
|
@@ -2919,7 +2892,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
|
|
* If the mapping has been changed, let the vcpu fault on the
|
|
|
* same address again.
|
|
|
*/
|
|
|
- if (!is_rmap_spte(spte)) {
|
|
|
+ if (!is_shadow_present_pte(spte)) {
|
|
|
ret = true;
|
|
|
goto exit;
|
|
|
}
|
|
@@ -3018,11 +2991,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
|
|
make_mmu_pages_available(vcpu);
|
|
|
if (likely(!force_pt_level))
|
|
|
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
|
|
|
- r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
|
|
|
- prefault);
|
|
|
+ r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
|
-
|
|
|
return r;
|
|
|
|
|
|
out_unlock:
|
|
@@ -3097,8 +3068,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
make_mmu_pages_available(vcpu);
|
|
|
- sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
|
|
|
- 1, ACC_ALL, NULL);
|
|
|
+ sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
vcpu->arch.mmu.root_hpa = __pa(sp->spt);
|
|
@@ -3110,9 +3080,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
make_mmu_pages_available(vcpu);
|
|
|
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
|
|
|
- i << 30,
|
|
|
- PT32_ROOT_LEVEL, 1, ACC_ALL,
|
|
|
- NULL);
|
|
|
+ i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
|
|
|
root = __pa(sp->spt);
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
@@ -3149,7 +3117,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
make_mmu_pages_available(vcpu);
|
|
|
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
|
|
|
- 0, ACC_ALL, NULL);
|
|
|
+ 0, ACC_ALL);
|
|
|
root = __pa(sp->spt);
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
@@ -3182,9 +3150,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
make_mmu_pages_available(vcpu);
|
|
|
- sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
|
|
|
- PT32_ROOT_LEVEL, 0,
|
|
|
- ACC_ALL, NULL);
|
|
|
+ sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
|
|
|
+ 0, ACC_ALL);
|
|
|
root = __pa(sp->spt);
|
|
|
++sp->root_count;
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
@@ -3531,8 +3498,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
|
|
make_mmu_pages_available(vcpu);
|
|
|
if (likely(!force_pt_level))
|
|
|
transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
|
|
|
- r = __direct_map(vcpu, gpa, write, map_writable,
|
|
|
- level, gfn, pfn, prefault);
|
|
|
+ r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
|
return r;
|
|
@@ -4058,10 +4024,12 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
|
|
g_context->inject_page_fault = kvm_inject_page_fault;
|
|
|
|
|
|
/*
|
|
|
- * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
|
|
|
- * translation of l2_gpa to l1_gpa addresses is done using the
|
|
|
- * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
|
|
|
- * functions between mmu and nested_mmu are swapped.
|
|
|
+ * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
|
|
|
+ * L1's nested page tables (e.g. EPT12). The nested translation
|
|
|
+ * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
|
|
|
+ * L2's page tables as the first level of translation and L1's
|
|
|
+ * nested page tables as the second level of translation. Basically
|
|
|
+ * the gva_to_gpa functions between mmu and nested_mmu are swapped.
|
|
|
*/
|
|
|
if (!is_paging(vcpu)) {
|
|
|
g_context->nx = false;
|
|
@@ -4495,7 +4463,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
|
|
|
/* The return value indicates if tlb flush on all vcpus is needed. */
|
|
|
-typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap);
|
|
|
+typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
|
|
|
|
|
|
/* The caller should hold mmu-lock before calling this function. */
|
|
|
static bool
|
|
@@ -4589,9 +4557,10 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
|
}
|
|
|
|
|
|
-static bool slot_rmap_write_protect(struct kvm *kvm, unsigned long *rmapp)
|
|
|
+static bool slot_rmap_write_protect(struct kvm *kvm,
|
|
|
+ struct kvm_rmap_head *rmap_head)
|
|
|
{
|
|
|
- return __rmap_write_protect(kvm, rmapp, false);
|
|
|
+ return __rmap_write_protect(kvm, rmap_head, false);
|
|
|
}
|
|
|
|
|
|
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|
@@ -4627,7 +4596,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
|
|
}
|
|
|
|
|
|
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
|
|
- unsigned long *rmapp)
|
|
|
+ struct kvm_rmap_head *rmap_head)
|
|
|
{
|
|
|
u64 *sptep;
|
|
|
struct rmap_iterator iter;
|
|
@@ -4636,7 +4605,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
restart:
|
|
|
- for_each_rmap_spte(rmapp, &iter, sptep) {
|
|
|
+ for_each_rmap_spte(rmap_head, &iter, sptep) {
|
|
|
sp = page_header(__pa(sptep));
|
|
|
pfn = spte_to_pfn(*sptep);
|
|
|
|