|
@@ -1261,6 +1261,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|
/* Nothing to do */
|
|
/* Nothing to do */
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
|
|
|
+ rpte = be64_to_cpu(hptep[1]);
|
|
|
|
+ vpte = hpte_new_to_old_v(vpte, rpte);
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Unmap */
|
|
/* Unmap */
|
|
rev = &old->rev[idx];
|
|
rev = &old->rev[idx];
|
|
guest_rpte = rev->guest_rpte;
|
|
guest_rpte = rev->guest_rpte;
|
|
@@ -1290,7 +1295,6 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|
|
|
|
|
/* Reload PTE after unmap */
|
|
/* Reload PTE after unmap */
|
|
vpte = be64_to_cpu(hptep[0]);
|
|
vpte = be64_to_cpu(hptep[0]);
|
|
-
|
|
|
|
BUG_ON(vpte & HPTE_V_VALID);
|
|
BUG_ON(vpte & HPTE_V_VALID);
|
|
BUG_ON(!(vpte & HPTE_V_ABSENT));
|
|
BUG_ON(!(vpte & HPTE_V_ABSENT));
|
|
|
|
|
|
@@ -1299,6 +1303,12 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
rpte = be64_to_cpu(hptep[1]);
|
|
rpte = be64_to_cpu(hptep[1]);
|
|
|
|
+
|
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
|
|
|
+ vpte = hpte_new_to_old_v(vpte, rpte);
|
|
|
|
+ rpte = hpte_new_to_old_r(rpte);
|
|
|
|
+ }
|
|
|
|
+
|
|
pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
|
|
pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
|
|
avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
|
|
avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
|
|
pteg = idx / HPTES_PER_GROUP;
|
|
pteg = idx / HPTES_PER_GROUP;
|
|
@@ -1336,6 +1346,10 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|
new_hptep = (__be64 *)(new->virt + (new_idx << 4));
|
|
new_hptep = (__be64 *)(new->virt + (new_idx << 4));
|
|
|
|
|
|
replace_vpte = be64_to_cpu(new_hptep[0]);
|
|
replace_vpte = be64_to_cpu(new_hptep[0]);
|
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
|
|
|
+ unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
|
|
|
|
+ replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
|
|
|
|
+ }
|
|
|
|
|
|
if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
|
|
if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
|
|
BUG_ON(new->order >= old->order);
|
|
BUG_ON(new->order >= old->order);
|
|
@@ -1351,6 +1365,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|
/* Discard the previous HPTE */
|
|
/* Discard the previous HPTE */
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
|
|
|
+ rpte = hpte_old_to_new_r(vpte, rpte);
|
|
|
|
+ vpte = hpte_old_to_new_v(vpte);
|
|
|
|
+ }
|
|
|
|
+
|
|
new_hptep[1] = cpu_to_be64(rpte);
|
|
new_hptep[1] = cpu_to_be64(rpte);
|
|
new->rev[new_idx].guest_rpte = guest_rpte;
|
|
new->rev[new_idx].guest_rpte = guest_rpte;
|
|
/* No need for a barrier, since new HPT isn't active */
|
|
/* No need for a barrier, since new HPT isn't active */
|
|
@@ -1368,12 +1387,6 @@ static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
|
|
unsigned long i;
|
|
unsigned long i;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
- /*
|
|
|
|
- * resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs
|
|
|
|
- * that POWER9 uses, and could well hit a BUG_ON on POWER9.
|
|
|
|
- */
|
|
|
|
- if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
|
|
- return -EIO;
|
|
|
|
for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
|
|
for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
|
|
rc = resize_hpt_rehash_hpte(resize, i);
|
|
rc = resize_hpt_rehash_hpte(resize, i);
|
|
if (rc != 0)
|
|
if (rc != 0)
|
|
@@ -1404,6 +1417,9 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
|
|
|
|
|
|
synchronize_srcu_expedited(&kvm->srcu);
|
|
synchronize_srcu_expedited(&kvm->srcu);
|
|
|
|
|
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
|
|
+ kvmppc_setup_partition_table(kvm);
|
|
|
|
+
|
|
resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
|
|
resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
|
|
}
|
|
}
|
|
|
|
|