|
@@ -83,15 +83,11 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
|
|
|
|
|
|
kvm->arch.hpt.virt = hpt;
|
|
|
kvm->arch.hpt.order = order;
|
|
|
- /* HPTEs are 2**4 bytes long */
|
|
|
- kvm->arch.hpt.npte = 1ul << (order - 4);
|
|
|
- /* 128 (2**7) bytes in each HPTEG */
|
|
|
- kvm->arch.hpt.mask = (1ul << (order - 7)) - 1;
|
|
|
|
|
|
atomic64_set(&kvm->arch.mmio_update, 0);
|
|
|
|
|
|
/* Allocate reverse map array */
|
|
|
- rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt.npte);
|
|
|
+ rev = vmalloc(sizeof(struct revmap_entry) * kvmppc_hpt_npte(&kvm->arch.hpt));
|
|
|
if (!rev) {
|
|
|
pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
|
|
|
goto out_freehpt;
|
|
@@ -196,8 +192,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
|
|
|
if (npages > 1ul << (40 - porder))
|
|
|
npages = 1ul << (40 - porder);
|
|
|
/* Can't use more than 1 HPTE per HPTEG */
|
|
|
- if (npages > kvm->arch.hpt.mask + 1)
|
|
|
- npages = kvm->arch.hpt.mask + 1;
|
|
|
+ if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1)
|
|
|
+ npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1;
|
|
|
|
|
|
hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
|
|
|
HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
|
|
@@ -207,7 +203,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
|
|
|
for (i = 0; i < npages; ++i) {
|
|
|
addr = i << porder;
|
|
|
/* can't use hpt_hash since va > 64 bits */
|
|
|
- hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt.mask;
|
|
|
+ hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25)))
|
|
|
+ & kvmppc_hpt_mask(&kvm->arch.hpt);
|
|
|
/*
|
|
|
* We assume that the hash table is empty and no
|
|
|
* vcpus are using it at this stage. Since we create
|
|
@@ -1327,7 +1324,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
|
|
|
|
|
|
/* Skip uninteresting entries, i.e. clean on not-first pass */
|
|
|
if (!first_pass) {
|
|
|
- while (i < kvm->arch.hpt.npte &&
|
|
|
+ while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
|
|
|
!hpte_dirty(revp, hptp)) {
|
|
|
++i;
|
|
|
hptp += 2;
|
|
@@ -1337,7 +1334,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
|
|
|
hdr.index = i;
|
|
|
|
|
|
/* Grab a series of valid entries */
|
|
|
- while (i < kvm->arch.hpt.npte &&
|
|
|
+ while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
|
|
|
hdr.n_valid < 0xffff &&
|
|
|
nb + HPTE_SIZE < count &&
|
|
|
record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
|
|
@@ -1353,7 +1350,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
|
|
|
++revp;
|
|
|
}
|
|
|
/* Now skip invalid entries while we can */
|
|
|
- while (i < kvm->arch.hpt.npte &&
|
|
|
+ while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
|
|
|
hdr.n_invalid < 0xffff &&
|
|
|
record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
|
|
|
/* found an invalid entry */
|
|
@@ -1374,7 +1371,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
|
|
|
}
|
|
|
|
|
|
/* Check if we've wrapped around the hash table */
|
|
|
- if (i >= kvm->arch.hpt.npte) {
|
|
|
+ if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
|
|
|
i = 0;
|
|
|
ctx->first_pass = 0;
|
|
|
break;
|
|
@@ -1433,8 +1430,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
|
|
|
|
|
|
err = -EINVAL;
|
|
|
i = hdr.index;
|
|
|
- if (i >= kvm->arch.hpt.npte ||
|
|
|
- i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt.npte)
|
|
|
+ if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) ||
|
|
|
+ i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt))
|
|
|
break;
|
|
|
|
|
|
hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
|
|
@@ -1625,7 +1622,8 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
|
|
|
kvm = p->kvm;
|
|
|
i = p->hpt_index;
|
|
|
hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
|
|
|
- for (; len != 0 && i < kvm->arch.hpt.npte; ++i, hptp += 2) {
|
|
|
+ for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt);
|
|
|
+ ++i, hptp += 2) {
|
|
|
if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
|
|
|
continue;
|
|
|
|