|
@@ -701,12 +701,13 @@ static void kvmppc_rmap_reset(struct kvm *kvm)
|
|
|
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
|
|
}
|
|
|
|
|
|
+typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
+ unsigned long gfn);
|
|
|
+
|
|
|
static int kvm_handle_hva_range(struct kvm *kvm,
|
|
|
unsigned long start,
|
|
|
unsigned long end,
|
|
|
- int (*handler)(struct kvm *kvm,
|
|
|
- unsigned long *rmapp,
|
|
|
- unsigned long gfn))
|
|
|
+ hva_handler_fn handler)
|
|
|
{
|
|
|
int ret;
|
|
|
int retval = 0;
|
|
@@ -731,9 +732,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
|
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
|
|
|
|
|
for (; gfn < gfn_end; ++gfn) {
|
|
|
- gfn_t gfn_offset = gfn - memslot->base_gfn;
|
|
|
-
|
|
|
- ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
|
|
|
+ ret = handler(kvm, memslot, gfn);
|
|
|
retval |= ret;
|
|
|
}
|
|
|
}
|
|
@@ -742,20 +741,21 @@ static int kvm_handle_hva_range(struct kvm *kvm,
|
|
|
}
|
|
|
|
|
|
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|
|
- int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
|
|
- unsigned long gfn))
|
|
|
+ hva_handler_fn handler)
|
|
|
{
|
|
|
return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
|
|
|
}
|
|
|
|
|
|
-static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
unsigned long gfn)
|
|
|
{
|
|
|
struct revmap_entry *rev = kvm->arch.revmap;
|
|
|
unsigned long h, i, j;
|
|
|
__be64 *hptep;
|
|
|
unsigned long ptel, psize, rcbits;
|
|
|
+ unsigned long *rmapp;
|
|
|
|
|
|
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
|
|
for (;;) {
|
|
|
lock_rmap(rmapp);
|
|
|
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
|
|
@@ -816,26 +816,36 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
|
|
|
int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
|
|
|
{
|
|
|
- kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
|
|
|
+ hva_handler_fn handler;
|
|
|
+
|
|
|
+ handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
|
|
|
+ kvm_handle_hva(kvm, hva, handler);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
|
|
|
{
|
|
|
- kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
|
|
|
+ hva_handler_fn handler;
|
|
|
+
|
|
|
+ handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
|
|
|
+ kvm_handle_hva_range(kvm, start, end, handler);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
|
|
|
struct kvm_memory_slot *memslot)
|
|
|
{
|
|
|
- unsigned long *rmapp;
|
|
|
unsigned long gfn;
|
|
|
unsigned long n;
|
|
|
+ unsigned long *rmapp;
|
|
|
|
|
|
- rmapp = memslot->arch.rmap;
|
|
|
gfn = memslot->base_gfn;
|
|
|
- for (n = memslot->npages; n; --n) {
|
|
|
+ rmapp = memslot->arch.rmap;
|
|
|
+ for (n = memslot->npages; n; --n, ++gfn) {
|
|
|
+ if (kvm_is_radix(kvm)) {
|
|
|
+ kvm_unmap_radix(kvm, memslot, gfn);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
/*
|
|
|
* Testing the present bit without locking is OK because
|
|
|
* the memslot has been marked invalid already, and hence
|
|
@@ -843,20 +853,21 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
|
|
|
* thus the present bit can't go from 0 to 1.
|
|
|
*/
|
|
|
if (*rmapp & KVMPPC_RMAP_PRESENT)
|
|
|
- kvm_unmap_rmapp(kvm, rmapp, gfn);
|
|
|
+ kvm_unmap_rmapp(kvm, memslot, gfn);
|
|
|
++rmapp;
|
|
|
- ++gfn;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
unsigned long gfn)
|
|
|
{
|
|
|
struct revmap_entry *rev = kvm->arch.revmap;
|
|
|
unsigned long head, i, j;
|
|
|
__be64 *hptep;
|
|
|
int ret = 0;
|
|
|
+ unsigned long *rmapp;
|
|
|
|
|
|
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
|
|
retry:
|
|
|
lock_rmap(rmapp);
|
|
|
if (*rmapp & KVMPPC_RMAP_REFERENCED) {
|
|
@@ -904,17 +915,22 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
|
|
|
int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
|
|
|
{
|
|
|
- return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
|
|
|
+ hva_handler_fn handler;
|
|
|
+
|
|
|
+ handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp;
|
|
|
+ return kvm_handle_hva_range(kvm, start, end, handler);
|
|
|
}
|
|
|
|
|
|
-static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
+static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
|
|
unsigned long gfn)
|
|
|
{
|
|
|
struct revmap_entry *rev = kvm->arch.revmap;
|
|
|
unsigned long head, i, j;
|
|
|
unsigned long *hp;
|
|
|
int ret = 1;
|
|
|
+ unsigned long *rmapp;
|
|
|
|
|
|
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
|
|
if (*rmapp & KVMPPC_RMAP_REFERENCED)
|
|
|
return 1;
|
|
|
|
|
@@ -940,12 +956,18 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|
|
|
|
|
int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
|
|
|
{
|
|
|
- return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
|
|
|
+ hva_handler_fn handler;
|
|
|
+
|
|
|
+ handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp;
|
|
|
+ return kvm_handle_hva(kvm, hva, handler);
|
|
|
}
|
|
|
|
|
|
void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|
|
{
|
|
|
- kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
|
|
|
+ hva_handler_fn handler;
|
|
|
+
|
|
|
+ handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
|
|
|
+ kvm_handle_hva(kvm, hva, handler);
|
|
|
}
|
|
|
|
|
|
static int vcpus_running(struct kvm *kvm)
|