|
@@ -1262,7 +1262,8 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
|
|
}
|
|
}
|
|
|
|
|
|
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
- struct kvm_memory_slot *slot, unsigned long data)
|
|
|
|
|
|
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
|
|
|
+ unsigned long data)
|
|
{
|
|
{
|
|
u64 *sptep;
|
|
u64 *sptep;
|
|
struct rmap_iterator iter;
|
|
struct rmap_iterator iter;
|
|
@@ -1270,7 +1271,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
|
|
|
while ((sptep = rmap_get_first(*rmapp, &iter))) {
|
|
while ((sptep = rmap_get_first(*rmapp, &iter))) {
|
|
BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
|
BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
|
- rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
|
|
|
|
|
|
+ rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n",
|
|
|
|
+ sptep, *sptep, gfn, level);
|
|
|
|
|
|
drop_spte(kvm, sptep);
|
|
drop_spte(kvm, sptep);
|
|
need_tlb_flush = 1;
|
|
need_tlb_flush = 1;
|
|
@@ -1280,7 +1282,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
}
|
|
}
|
|
|
|
|
|
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
- struct kvm_memory_slot *slot, unsigned long data)
|
|
|
|
|
|
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
|
|
|
+ unsigned long data)
|
|
{
|
|
{
|
|
u64 *sptep;
|
|
u64 *sptep;
|
|
struct rmap_iterator iter;
|
|
struct rmap_iterator iter;
|
|
@@ -1294,7 +1297,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
|
|
|
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
|
|
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
|
|
BUG_ON(!is_shadow_present_pte(*sptep));
|
|
BUG_ON(!is_shadow_present_pte(*sptep));
|
|
- rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
|
|
|
|
|
|
+ rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
|
|
|
|
+ sptep, *sptep, gfn, level);
|
|
|
|
|
|
need_flush = 1;
|
|
need_flush = 1;
|
|
|
|
|
|
@@ -1328,6 +1332,8 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
int (*handler)(struct kvm *kvm,
|
|
int (*handler)(struct kvm *kvm,
|
|
unsigned long *rmapp,
|
|
unsigned long *rmapp,
|
|
struct kvm_memory_slot *slot,
|
|
struct kvm_memory_slot *slot,
|
|
|
|
+ gfn_t gfn,
|
|
|
|
+ int level,
|
|
unsigned long data))
|
|
unsigned long data))
|
|
{
|
|
{
|
|
int j;
|
|
int j;
|
|
@@ -1357,6 +1363,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
|
|
j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
|
|
unsigned long idx, idx_end;
|
|
unsigned long idx, idx_end;
|
|
unsigned long *rmapp;
|
|
unsigned long *rmapp;
|
|
|
|
+ gfn_t gfn = gfn_start;
|
|
|
|
|
|
/*
|
|
/*
|
|
* {idx(page_j) | page_j intersects with
|
|
* {idx(page_j) | page_j intersects with
|
|
@@ -1367,8 +1374,10 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
|
|
|
|
rmapp = __gfn_to_rmap(gfn_start, j, memslot);
|
|
rmapp = __gfn_to_rmap(gfn_start, j, memslot);
|
|
|
|
|
|
- for (; idx <= idx_end; ++idx)
|
|
|
|
- ret |= handler(kvm, rmapp++, memslot, data);
|
|
|
|
|
|
+ for (; idx <= idx_end;
|
|
|
|
+ ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j)))
|
|
|
|
+ ret |= handler(kvm, rmapp++, memslot,
|
|
|
|
+ gfn, j, data);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1379,6 +1388,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|
unsigned long data,
|
|
unsigned long data,
|
|
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
|
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
|
struct kvm_memory_slot *slot,
|
|
struct kvm_memory_slot *slot,
|
|
|
|
+ gfn_t gfn, int level,
|
|
unsigned long data))
|
|
unsigned long data))
|
|
{
|
|
{
|
|
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
|
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
|
@@ -1400,7 +1410,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|
}
|
|
}
|
|
|
|
|
|
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
- struct kvm_memory_slot *slot, unsigned long data)
|
|
|
|
|
|
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
|
|
|
|
+ unsigned long data)
|
|
{
|
|
{
|
|
u64 *sptep;
|
|
u64 *sptep;
|
|
struct rmap_iterator uninitialized_var(iter);
|
|
struct rmap_iterator uninitialized_var(iter);
|
|
@@ -1415,7 +1426,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
* out actively used pages or breaking up actively used hugepages.
|
|
* out actively used pages or breaking up actively used hugepages.
|
|
*/
|
|
*/
|
|
if (!shadow_accessed_mask) {
|
|
if (!shadow_accessed_mask) {
|
|
- young = kvm_unmap_rmapp(kvm, rmapp, slot, data);
|
|
|
|
|
|
+ young = kvm_unmap_rmapp(kvm, rmapp, slot, gfn, level, data);
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1430,13 +1441,13 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
out:
|
|
out:
|
|
- /* @data has hva passed to kvm_age_hva(). */
|
|
|
|
- trace_kvm_age_page(data, slot, young);
|
|
|
|
|
|
+ trace_kvm_age_page(gfn, level, slot, young);
|
|
return young;
|
|
return young;
|
|
}
|
|
}
|
|
|
|
|
|
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
- struct kvm_memory_slot *slot, unsigned long data)
|
|
|
|
|
|
+ struct kvm_memory_slot *slot, gfn_t gfn,
|
|
|
|
+ int level, unsigned long data)
|
|
{
|
|
{
|
|
u64 *sptep;
|
|
u64 *sptep;
|
|
struct rmap_iterator iter;
|
|
struct rmap_iterator iter;
|
|
@@ -1474,13 +1485,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|
|
|
|
|
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
|
|
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
|
|
|
|
|
|
- kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0);
|
|
|
|
|
|
+ kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
|
|
kvm_flush_remote_tlbs(vcpu->kvm);
|
|
kvm_flush_remote_tlbs(vcpu->kvm);
|
|
}
|
|
}
|
|
|
|
|
|
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
|
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
|
{
|
|
{
|
|
- return kvm_handle_hva(kvm, hva, hva, kvm_age_rmapp);
|
|
|
|
|
|
+ return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
|
|
}
|
|
}
|
|
|
|
|
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|